blob: e9d6481ed9eb5ef1b2352e7b45de7fc60fb1efef [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000072static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040075
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000077 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000078 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040079 /* required last entry */
80 {0,}
81};
82
83MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
84
85/* This hardware semaphore causes exclusive access to
86 * resources shared between the NIC driver, MPI firmware,
87 * FCOE firmware and the FC driver.
88 */
89static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
90{
91 u32 sem_bits = 0;
92
93 switch (sem_mask) {
94 case SEM_XGMAC0_MASK:
95 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
96 break;
97 case SEM_XGMAC1_MASK:
98 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
99 break;
100 case SEM_ICB_MASK:
101 sem_bits = SEM_SET << SEM_ICB_SHIFT;
102 break;
103 case SEM_MAC_ADDR_MASK:
104 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
105 break;
106 case SEM_FLASH_MASK:
107 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
108 break;
109 case SEM_PROBE_MASK:
110 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
111 break;
112 case SEM_RT_IDX_MASK:
113 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
114 break;
115 case SEM_PROC_REG_MASK:
116 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
117 break;
118 default:
119 QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
120 return -EINVAL;
121 }
122
123 ql_write32(qdev, SEM, sem_bits | sem_mask);
124 return !(ql_read32(qdev, SEM) & sem_bits);
125}
126
127int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
128{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000129 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400130 do {
131 if (!ql_sem_trylock(qdev, sem_mask))
132 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000133 udelay(100);
134 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400135 return -ETIMEDOUT;
136}
137
138void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
139{
140 ql_write32(qdev, SEM, sem_mask);
141 ql_read32(qdev, SEM); /* flush */
142}
143
144/* This function waits for a specific bit to come ready
145 * in a given register. It is used mostly by the initialize
146 * process, but is also used in kernel thread API such as
147 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
148 */
149int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
150{
151 u32 temp;
152 int count = UDELAY_COUNT;
153
154 while (count) {
155 temp = ql_read32(qdev, reg);
156
157 /* check for errors */
158 if (temp & err_bit) {
159 QPRINTK(qdev, PROBE, ALERT,
160 "register 0x%.08x access error, value = 0x%.08x!.\n",
161 reg, temp);
162 return -EIO;
163 } else if (temp & bit)
164 return 0;
165 udelay(UDELAY_DELAY);
166 count--;
167 }
168 QPRINTK(qdev, PROBE, ALERT,
169 "Timed out waiting for reg %x to come ready.\n", reg);
170 return -ETIMEDOUT;
171}
172
173/* The CFG register is used to download TX and RX control blocks
174 * to the chip. This function waits for an operation to complete.
175 */
176static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
177{
178 int count = UDELAY_COUNT;
179 u32 temp;
180
181 while (count) {
182 temp = ql_read32(qdev, CFG);
183 if (temp & CFG_LE)
184 return -EIO;
185 if (!(temp & bit))
186 return 0;
187 udelay(UDELAY_DELAY);
188 count--;
189 }
190 return -ETIMEDOUT;
191}
192
193
194/* Used to issue init control blocks to hw. Maps control block,
195 * sets address, triggers download, waits for completion.
196 */
197int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
198 u16 q_id)
199{
200 u64 map;
201 int status = 0;
202 int direction;
203 u32 mask;
204 u32 value;
205
206 direction =
207 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
208 PCI_DMA_FROMDEVICE;
209
210 map = pci_map_single(qdev->pdev, ptr, size, direction);
211 if (pci_dma_mapping_error(qdev->pdev, map)) {
212 QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
213 return -ENOMEM;
214 }
215
Ron Mercer4322c5b2009-07-02 06:06:06 +0000216 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
217 if (status)
218 return status;
219
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400220 status = ql_wait_cfg(qdev, bit);
221 if (status) {
222 QPRINTK(qdev, IFUP, ERR,
223 "Timed out waiting for CFG to come ready.\n");
224 goto exit;
225 }
226
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400227 ql_write32(qdev, ICB_L, (u32) map);
228 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400229
230 mask = CFG_Q_MASK | (bit << 16);
231 value = bit | (q_id << CFG_Q_SHIFT);
232 ql_write32(qdev, CFG, (mask | value));
233
234 /*
235 * Wait for the bit to clear after signaling hw.
236 */
237 status = ql_wait_cfg(qdev, bit);
238exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000239 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400240 pci_unmap_single(qdev->pdev, map, size, direction);
241 return status;
242}
243
244/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
245int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
246 u32 *value)
247{
248 u32 offset = 0;
249 int status;
250
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400251 switch (type) {
252 case MAC_ADDR_TYPE_MULTI_MAC:
253 case MAC_ADDR_TYPE_CAM_MAC:
254 {
255 status =
256 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800257 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400258 if (status)
259 goto exit;
260 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
261 (index << MAC_ADDR_IDX_SHIFT) | /* index */
262 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
263 status =
264 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800265 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400266 if (status)
267 goto exit;
268 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
269 status =
270 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800271 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400272 if (status)
273 goto exit;
274 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
275 (index << MAC_ADDR_IDX_SHIFT) | /* index */
276 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
277 status =
278 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800279 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400280 if (status)
281 goto exit;
282 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
283 if (type == MAC_ADDR_TYPE_CAM_MAC) {
284 status =
285 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800286 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400287 if (status)
288 goto exit;
289 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
290 (index << MAC_ADDR_IDX_SHIFT) | /* index */
291 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
292 status =
293 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800294 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400295 if (status)
296 goto exit;
297 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
298 }
299 break;
300 }
301 case MAC_ADDR_TYPE_VLAN:
302 case MAC_ADDR_TYPE_MULTI_FLTR:
303 default:
304 QPRINTK(qdev, IFUP, CRIT,
305 "Address type %d not yet supported.\n", type);
306 status = -EPERM;
307 }
308exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400309 return status;
310}
311
312/* Set up a MAC, multicast or VLAN address for the
313 * inbound frame matching.
314 */
315static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
316 u16 index)
317{
318 u32 offset = 0;
319 int status = 0;
320
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 switch (type) {
322 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000323 {
324 u32 upper = (addr[0] << 8) | addr[1];
325 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
326 (addr[4] << 8) | (addr[5]);
327
328 status =
329 ql_wait_reg_rdy(qdev,
330 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
331 if (status)
332 goto exit;
333 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
334 (index << MAC_ADDR_IDX_SHIFT) |
335 type | MAC_ADDR_E);
336 ql_write32(qdev, MAC_ADDR_DATA, lower);
337 status =
338 ql_wait_reg_rdy(qdev,
339 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
340 if (status)
341 goto exit;
342 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
343 (index << MAC_ADDR_IDX_SHIFT) |
344 type | MAC_ADDR_E);
345
346 ql_write32(qdev, MAC_ADDR_DATA, upper);
347 status =
348 ql_wait_reg_rdy(qdev,
349 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
350 if (status)
351 goto exit;
352 break;
353 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400354 case MAC_ADDR_TYPE_CAM_MAC:
355 {
356 u32 cam_output;
357 u32 upper = (addr[0] << 8) | addr[1];
358 u32 lower =
359 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
360 (addr[5]);
361
Ron Mercer49740972009-02-26 10:08:36 +0000362 QPRINTK(qdev, IFUP, DEBUG,
Johannes Berg7c510e42008-10-27 17:47:26 -0700363 "Adding %s address %pM"
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400364 " at index %d in the CAM.\n",
365 ((type ==
366 MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
Johannes Berg7c510e42008-10-27 17:47:26 -0700367 "UNICAST"), addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400368
369 status =
370 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800371 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400372 if (status)
373 goto exit;
374 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
375 (index << MAC_ADDR_IDX_SHIFT) | /* index */
376 type); /* type */
377 ql_write32(qdev, MAC_ADDR_DATA, lower);
378 status =
379 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800380 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400381 if (status)
382 goto exit;
383 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
384 (index << MAC_ADDR_IDX_SHIFT) | /* index */
385 type); /* type */
386 ql_write32(qdev, MAC_ADDR_DATA, upper);
387 status =
388 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800389 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400390 if (status)
391 goto exit;
392 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
393 (index << MAC_ADDR_IDX_SHIFT) | /* index */
394 type); /* type */
395 /* This field should also include the queue id
396 and possibly the function id. Right now we hardcode
397 the route field to NIC core.
398 */
Ron Mercer76b26692009-10-08 09:54:40 +0000399 cam_output = (CAM_OUT_ROUTE_NIC |
400 (qdev->
401 func << CAM_OUT_FUNC_SHIFT) |
402 (0 << CAM_OUT_CQ_ID_SHIFT));
403 if (qdev->vlgrp)
404 cam_output |= CAM_OUT_RV;
405 /* route to NIC core */
406 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400407 break;
408 }
409 case MAC_ADDR_TYPE_VLAN:
410 {
411 u32 enable_bit = *((u32 *) &addr[0]);
412 /* For VLAN, the addr actually holds a bit that
413 * either enables or disables the vlan id we are
414 * addressing. It's either MAC_ADDR_E on or off.
415 * That's bit-27 we're talking about.
416 */
417 QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
418 (enable_bit ? "Adding" : "Removing"),
419 index, (enable_bit ? "to" : "from"));
420
421 status =
422 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800423 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400424 if (status)
425 goto exit;
426 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
427 (index << MAC_ADDR_IDX_SHIFT) | /* index */
428 type | /* type */
429 enable_bit); /* enable/disable */
430 break;
431 }
432 case MAC_ADDR_TYPE_MULTI_FLTR:
433 default:
434 QPRINTK(qdev, IFUP, CRIT,
435 "Address type %d not yet supported.\n", type);
436 status = -EPERM;
437 }
438exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400439 return status;
440}
441
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000442/* Set or clear MAC address in hardware. We sometimes
443 * have to clear it to prevent wrong frame routing
444 * especially in a bonding environment.
445 */
446static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
447{
448 int status;
449 char zero_mac_addr[ETH_ALEN];
450 char *addr;
451
452 if (set) {
453 addr = &qdev->ndev->dev_addr[0];
454 QPRINTK(qdev, IFUP, DEBUG,
455 "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
456 addr[0], addr[1], addr[2], addr[3],
457 addr[4], addr[5]);
458 } else {
459 memset(zero_mac_addr, 0, ETH_ALEN);
460 addr = &zero_mac_addr[0];
461 QPRINTK(qdev, IFUP, DEBUG,
462 "Clearing MAC address on %s\n",
463 qdev->ndev->name);
464 }
465 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
466 if (status)
467 return status;
468 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
469 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
470 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
471 if (status)
472 QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
473 "address.\n");
474 return status;
475}
476
Ron Mercer6a473302009-07-02 06:06:12 +0000477void ql_link_on(struct ql_adapter *qdev)
478{
479 QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
480 qdev->ndev->name);
481 netif_carrier_on(qdev->ndev);
482 ql_set_mac_addr(qdev, 1);
483}
484
485void ql_link_off(struct ql_adapter *qdev)
486{
487 QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
488 qdev->ndev->name);
489 netif_carrier_off(qdev->ndev);
490 ql_set_mac_addr(qdev, 0);
491}
492
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400493/* Get a specific frame routing value from the CAM.
494 * Used for debug and reg dump.
495 */
496int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
497{
498 int status = 0;
499
Ron Mercer939678f2009-01-04 17:08:29 -0800500 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501 if (status)
502 goto exit;
503
504 ql_write32(qdev, RT_IDX,
505 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800506 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400507 if (status)
508 goto exit;
509 *value = ql_read32(qdev, RT_DATA);
510exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400511 return status;
512}
513
514/* The NIC function for this chip has 16 routing indexes. Each one can be used
515 * to route different frame types to various inbound queues. We send broadcast/
516 * multicast/error frames to the default queue for slow handling,
517 * and CAM hit/RSS frames to the fast handling queues.
518 */
519static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
520 int enable)
521{
Ron Mercer8587ea32009-02-23 10:42:15 +0000522 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400523 u32 value = 0;
524
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400525 QPRINTK(qdev, IFUP, DEBUG,
526 "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
527 (enable ? "Adding" : "Removing"),
528 ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
529 ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
530 ((index ==
531 RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
532 ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
533 ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
534 ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
535 ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
536 ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
537 ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
538 ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
539 ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
540 ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
541 ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
542 ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
543 ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
544 ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
545 (enable ? "to" : "from"));
546
547 switch (mask) {
548 case RT_IDX_CAM_HIT:
549 {
550 value = RT_IDX_DST_CAM_Q | /* dest */
551 RT_IDX_TYPE_NICQ | /* type */
552 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
553 break;
554 }
555 case RT_IDX_VALID: /* Promiscuous Mode frames. */
556 {
557 value = RT_IDX_DST_DFLT_Q | /* dest */
558 RT_IDX_TYPE_NICQ | /* type */
559 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
560 break;
561 }
562 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
563 {
564 value = RT_IDX_DST_DFLT_Q | /* dest */
565 RT_IDX_TYPE_NICQ | /* type */
566 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
567 break;
568 }
569 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
570 {
571 value = RT_IDX_DST_DFLT_Q | /* dest */
572 RT_IDX_TYPE_NICQ | /* type */
573 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
574 break;
575 }
576 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
577 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000578 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400579 RT_IDX_TYPE_NICQ | /* type */
580 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
581 break;
582 }
583 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
584 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000585 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400586 RT_IDX_TYPE_NICQ | /* type */
587 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
588 break;
589 }
590 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
591 {
592 value = RT_IDX_DST_RSS | /* dest */
593 RT_IDX_TYPE_NICQ | /* type */
594 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
595 break;
596 }
597 case 0: /* Clear the E-bit on an entry. */
598 {
599 value = RT_IDX_DST_DFLT_Q | /* dest */
600 RT_IDX_TYPE_NICQ | /* type */
601 (index << RT_IDX_IDX_SHIFT);/* index */
602 break;
603 }
604 default:
605 QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
606 mask);
607 status = -EPERM;
608 goto exit;
609 }
610
611 if (value) {
612 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
613 if (status)
614 goto exit;
615 value |= (enable ? RT_IDX_E : 0);
616 ql_write32(qdev, RT_IDX, value);
617 ql_write32(qdev, RT_DATA, enable ? mask : 0);
618 }
619exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400620 return status;
621}
622
623static void ql_enable_interrupts(struct ql_adapter *qdev)
624{
625 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
626}
627
628static void ql_disable_interrupts(struct ql_adapter *qdev)
629{
630 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
631}
632
633/* If we're running with multiple MSI-X vectors then we enable on the fly.
634 * Otherwise, we may have multiple outstanding workers and don't want to
635 * enable until the last one finishes. In this case, the irq_cnt gets
636 * incremented everytime we queue a worker and decremented everytime
637 * a worker finishes. Once it hits zero we enable the interrupt.
638 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700639u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400640{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700641 u32 var = 0;
642 unsigned long hw_flags = 0;
643 struct intr_context *ctx = qdev->intr_context + intr;
644
645 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
646 /* Always enable if we're MSIX multi interrupts and
647 * it's not the default (zeroeth) interrupt.
648 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400649 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700650 ctx->intr_en_mask);
651 var = ql_read32(qdev, STS);
652 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400653 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700654
655 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
656 if (atomic_dec_and_test(&ctx->irq_cnt)) {
657 ql_write32(qdev, INTR_EN,
658 ctx->intr_en_mask);
659 var = ql_read32(qdev, STS);
660 }
661 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
662 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400663}
664
665static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
666{
667 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700668 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400669
Ron Mercerbb0d2152008-10-20 10:30:26 -0700670 /* HW disables for us if we're MSIX multi interrupts and
671 * it's not the default (zeroeth) interrupt.
672 */
673 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
674 return 0;
675
676 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000677 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400679 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700680 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400681 var = ql_read32(qdev, STS);
682 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700683 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000684 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400685 return var;
686}
687
688static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
689{
690 int i;
691 for (i = 0; i < qdev->intr_count; i++) {
692 /* The enable call does a atomic_dec_and_test
693 * and enables only if the result is zero.
694 * So we precharge it here.
695 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700696 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
697 i == 0))
698 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400699 ql_enable_completion_interrupt(qdev, i);
700 }
701
702}
703
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000704static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
705{
706 int status, i;
707 u16 csum = 0;
708 __le16 *flash = (__le16 *)&qdev->flash;
709
710 status = strncmp((char *)&qdev->flash, str, 4);
711 if (status) {
712 QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
713 return status;
714 }
715
716 for (i = 0; i < size; i++)
717 csum += le16_to_cpu(*flash++);
718
719 if (csum)
720 QPRINTK(qdev, IFUP, ERR,
721 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
722
723 return csum;
724}
725
Ron Mercer26351472009-02-02 13:53:57 -0800726static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400727{
728 int status = 0;
729 /* wait for reg to come ready */
730 status = ql_wait_reg_rdy(qdev,
731 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
732 if (status)
733 goto exit;
734 /* set up for reg read */
735 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
736 /* wait for reg to come ready */
737 status = ql_wait_reg_rdy(qdev,
738 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
739 if (status)
740 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800741 /* This data is stored on flash as an array of
742 * __le32. Since ql_read32() returns cpu endian
743 * we need to swap it back.
744 */
745 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400746exit:
747 return status;
748}
749
Ron Mercercdca8d02009-03-02 08:07:31 +0000750static int ql_get_8000_flash_params(struct ql_adapter *qdev)
751{
752 u32 i, size;
753 int status;
754 __le32 *p = (__le32 *)&qdev->flash;
755 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000756 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000757
758 /* Get flash offset for function and adjust
759 * for dword access.
760 */
Ron Mercere4552f52009-06-09 05:39:32 +0000761 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000762 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
763 else
764 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
765
766 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
767 return -ETIMEDOUT;
768
769 size = sizeof(struct flash_params_8000) / sizeof(u32);
770 for (i = 0; i < size; i++, p++) {
771 status = ql_read_flash_word(qdev, i+offset, p);
772 if (status) {
773 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
774 goto exit;
775 }
776 }
777
778 status = ql_validate_flash(qdev,
779 sizeof(struct flash_params_8000) / sizeof(u16),
780 "8000");
781 if (status) {
782 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
783 status = -EINVAL;
784 goto exit;
785 }
786
Ron Mercer542512e2009-06-09 05:39:33 +0000787 /* Extract either manufacturer or BOFM modified
788 * MAC address.
789 */
790 if (qdev->flash.flash_params_8000.data_type1 == 2)
791 memcpy(mac_addr,
792 qdev->flash.flash_params_8000.mac_addr1,
793 qdev->ndev->addr_len);
794 else
795 memcpy(mac_addr,
796 qdev->flash.flash_params_8000.mac_addr,
797 qdev->ndev->addr_len);
798
799 if (!is_valid_ether_addr(mac_addr)) {
Ron Mercercdca8d02009-03-02 08:07:31 +0000800 QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
801 status = -EINVAL;
802 goto exit;
803 }
804
805 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000806 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000807 qdev->ndev->addr_len);
808
809exit:
810 ql_sem_unlock(qdev, SEM_FLASH_MASK);
811 return status;
812}
813
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000814static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400815{
816 int i;
817 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800818 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa2009-02-02 13:54:15 -0800819 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000820 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa2009-02-02 13:54:15 -0800821
822 /* Second function's parameters follow the first
823 * function's.
824 */
Ron Mercere4552f52009-06-09 05:39:32 +0000825 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000826 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400827
828 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
829 return -ETIMEDOUT;
830
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000831 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa2009-02-02 13:54:15 -0800832 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400833 if (status) {
834 QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
835 goto exit;
836 }
837
838 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000839
840 status = ql_validate_flash(qdev,
841 sizeof(struct flash_params_8012) / sizeof(u16),
842 "8012");
843 if (status) {
844 QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
845 status = -EINVAL;
846 goto exit;
847 }
848
849 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
850 status = -EINVAL;
851 goto exit;
852 }
853
854 memcpy(qdev->ndev->dev_addr,
855 qdev->flash.flash_params_8012.mac_addr,
856 qdev->ndev->addr_len);
857
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400858exit:
859 ql_sem_unlock(qdev, SEM_FLASH_MASK);
860 return status;
861}
862
863/* xgmac register are located behind the xgmac_addr and xgmac_data
864 * register pair. Each read/write requires us to wait for the ready
865 * bit before reading/writing the data.
866 */
867static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
868{
869 int status;
870 /* wait for reg to come ready */
871 status = ql_wait_reg_rdy(qdev,
872 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
873 if (status)
874 return status;
875 /* write the data to the data reg */
876 ql_write32(qdev, XGMAC_DATA, data);
877 /* trigger the write */
878 ql_write32(qdev, XGMAC_ADDR, reg);
879 return status;
880}
881
882/* xgmac register are located behind the xgmac_addr and xgmac_data
883 * register pair. Each read/write requires us to wait for the ready
884 * bit before reading/writing the data.
885 */
886int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
887{
888 int status = 0;
889 /* wait for reg to come ready */
890 status = ql_wait_reg_rdy(qdev,
891 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
892 if (status)
893 goto exit;
894 /* set up for reg read */
895 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
896 /* wait for reg to come ready */
897 status = ql_wait_reg_rdy(qdev,
898 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
899 if (status)
900 goto exit;
901 /* get the data */
902 *data = ql_read32(qdev, XGMAC_DATA);
903exit:
904 return status;
905}
906
907/* This is used for reading the 64-bit statistics regs. */
908int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
909{
910 int status = 0;
911 u32 hi = 0;
912 u32 lo = 0;
913
914 status = ql_read_xgmac_reg(qdev, reg, &lo);
915 if (status)
916 goto exit;
917
918 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
919 if (status)
920 goto exit;
921
922 *data = (u64) lo | ((u64) hi << 32);
923
924exit:
925 return status;
926}
927
Ron Mercercdca8d02009-03-02 08:07:31 +0000928static int ql_8000_port_initialize(struct ql_adapter *qdev)
929{
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000930 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000931 /*
932 * Get MPI firmware version for driver banner
933 * and ethool info.
934 */
935 status = ql_mb_about_fw(qdev);
936 if (status)
937 goto exit;
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +0000938 status = ql_mb_get_fw_state(qdev);
939 if (status)
940 goto exit;
941 /* Wake up a worker to get/set the TX/RX frame sizes. */
942 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
943exit:
944 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000945}
946
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400947/* Take the MAC Core out of reset.
948 * Enable statistics counting.
949 * Take the transmitter/receiver out of reset.
950 * This functionality may be done in the MPI firmware at a
951 * later date.
952 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000953static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400954{
955 int status = 0;
956 u32 data;
957
958 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
959 /* Another function has the semaphore, so
960 * wait for the port init bit to come ready.
961 */
962 QPRINTK(qdev, LINK, INFO,
963 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
964 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
965 if (status) {
966 QPRINTK(qdev, LINK, CRIT,
967 "Port initialize timed out.\n");
968 }
969 return status;
970 }
971
972 QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
973 /* Set the core reset. */
974 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
975 if (status)
976 goto end;
977 data |= GLOBAL_CFG_RESET;
978 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
979 if (status)
980 goto end;
981
982 /* Clear the core reset and turn on jumbo for receiver. */
983 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
984 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
985 data |= GLOBAL_CFG_TX_STAT_EN;
986 data |= GLOBAL_CFG_RX_STAT_EN;
987 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
988 if (status)
989 goto end;
990
991 /* Enable transmitter, and clear it's reset. */
992 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
993 if (status)
994 goto end;
995 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
996 data |= TX_CFG_EN; /* Enable the transmitter. */
997 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
998 if (status)
999 goto end;
1000
1001 /* Enable receiver and clear it's reset. */
1002 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1003 if (status)
1004 goto end;
1005 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1006 data |= RX_CFG_EN; /* Enable the receiver. */
1007 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1008 if (status)
1009 goto end;
1010
1011 /* Turn on jumbo. */
1012 status =
1013 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1014 if (status)
1015 goto end;
1016 status =
1017 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1018 if (status)
1019 goto end;
1020
1021 /* Signal to the world that the port is enabled. */
1022 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1023end:
1024 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1025 return status;
1026}
1027
Ron Mercer7c734352009-10-19 03:32:19 +00001028static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1029{
1030 return PAGE_SIZE << qdev->lbq_buf_order;
1031}
1032
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001033/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001034static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001035{
1036 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1037 rx_ring->lbq_curr_idx++;
1038 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1039 rx_ring->lbq_curr_idx = 0;
1040 rx_ring->lbq_free_cnt++;
1041 return lbq_desc;
1042}
1043
Ron Mercer7c734352009-10-19 03:32:19 +00001044static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1045 struct rx_ring *rx_ring)
1046{
1047 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1048
1049 pci_dma_sync_single_for_cpu(qdev->pdev,
1050 pci_unmap_addr(lbq_desc, mapaddr),
1051 rx_ring->lbq_buf_size,
1052 PCI_DMA_FROMDEVICE);
1053
1054 /* If it's the last chunk of our master page then
1055 * we unmap it.
1056 */
1057 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1058 == ql_lbq_block_size(qdev))
1059 pci_unmap_page(qdev->pdev,
1060 lbq_desc->p.pg_chunk.map,
1061 ql_lbq_block_size(qdev),
1062 PCI_DMA_FROMDEVICE);
1063 return lbq_desc;
1064}
1065
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001066/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001067static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001068{
1069 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1070 rx_ring->sbq_curr_idx++;
1071 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1072 rx_ring->sbq_curr_idx = 0;
1073 rx_ring->sbq_free_cnt++;
1074 return sbq_desc;
1075}
1076
1077/* Update an rx ring index. */
1078static void ql_update_cq(struct rx_ring *rx_ring)
1079{
1080 rx_ring->cnsmr_idx++;
1081 rx_ring->curr_entry++;
1082 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1083 rx_ring->cnsmr_idx = 0;
1084 rx_ring->curr_entry = rx_ring->cq_base;
1085 }
1086}
1087
1088static void ql_write_cq_idx(struct rx_ring *rx_ring)
1089{
1090 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1091}
1092
Ron Mercer7c734352009-10-19 03:32:19 +00001093static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1094 struct bq_desc *lbq_desc)
1095{
1096 if (!rx_ring->pg_chunk.page) {
1097 u64 map;
1098 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1099 GFP_ATOMIC,
1100 qdev->lbq_buf_order);
1101 if (unlikely(!rx_ring->pg_chunk.page)) {
1102 QPRINTK(qdev, DRV, ERR,
1103 "page allocation failed.\n");
1104 return -ENOMEM;
1105 }
1106 rx_ring->pg_chunk.offset = 0;
1107 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1108 0, ql_lbq_block_size(qdev),
1109 PCI_DMA_FROMDEVICE);
1110 if (pci_dma_mapping_error(qdev->pdev, map)) {
1111 __free_pages(rx_ring->pg_chunk.page,
1112 qdev->lbq_buf_order);
1113 QPRINTK(qdev, DRV, ERR,
1114 "PCI mapping failed.\n");
1115 return -ENOMEM;
1116 }
1117 rx_ring->pg_chunk.map = map;
1118 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119 }
1120
1121 /* Copy the current master pg_chunk info
1122 * to the current descriptor.
1123 */
1124 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1125
1126 /* Adjust the master page chunk for next
1127 * buffer get.
1128 */
1129 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1130 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1131 rx_ring->pg_chunk.page = NULL;
1132 lbq_desc->p.pg_chunk.last_flag = 1;
1133 } else {
1134 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1135 get_page(rx_ring->pg_chunk.page);
1136 lbq_desc->p.pg_chunk.last_flag = 0;
1137 }
1138 return 0;
1139}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001140/* Process (refill) a large buffer queue. */
1141static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1142{
Ron Mercer49f21862009-02-23 10:42:16 +00001143 u32 clean_idx = rx_ring->lbq_clean_idx;
1144 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001145 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001146 u64 map;
1147 int i;
1148
Ron Mercer7c734352009-10-19 03:32:19 +00001149 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150 for (i = 0; i < 16; i++) {
1151 QPRINTK(qdev, RX_STATUS, DEBUG,
1152 "lbq: try cleaning clean_idx = %d.\n",
1153 clean_idx);
1154 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001155 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1156 QPRINTK(qdev, IFUP, ERR,
1157 "Could not get a page chunk.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001158 return;
1159 }
Ron Mercer7c734352009-10-19 03:32:19 +00001160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001163 pci_unmap_addr_set(lbq_desc, mapaddr, map);
Ron Mercer7c734352009-10-19 03:32:19 +00001164 pci_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001166 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001171 clean_idx++;
1172 if (clean_idx == rx_ring->lbq_len)
1173 clean_idx = 0;
1174 }
1175
1176 rx_ring->lbq_clean_idx = clean_idx;
1177 rx_ring->lbq_prod_idx += 16;
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1179 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001180 rx_ring->lbq_free_cnt -= 16;
1181 }
1182
1183 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001184 QPRINTK(qdev, RX_STATUS, DEBUG,
1185 "lbq: updating prod idx = %d.\n",
1186 rx_ring->lbq_prod_idx);
1187 ql_write_db_reg(rx_ring->lbq_prod_idx,
1188 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001189 }
1190}
1191
1192/* Process (refill) a small buffer queue. */
1193static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1194{
Ron Mercer49f21862009-02-23 10:42:16 +00001195 u32 clean_idx = rx_ring->sbq_clean_idx;
1196 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001197 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001198 u64 map;
1199 int i;
1200
1201 while (rx_ring->sbq_free_cnt > 16) {
1202 for (i = 0; i < 16; i++) {
1203 sbq_desc = &rx_ring->sbq[clean_idx];
1204 QPRINTK(qdev, RX_STATUS, DEBUG,
1205 "sbq: try cleaning clean_idx = %d.\n",
1206 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001207 if (sbq_desc->p.skb == NULL) {
1208 QPRINTK(qdev, RX_STATUS, DEBUG,
1209 "sbq: getting new skb for index %d.\n",
1210 sbq_desc->index);
1211 sbq_desc->p.skb =
1212 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001213 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001214 if (sbq_desc->p.skb == NULL) {
1215 QPRINTK(qdev, PROBE, ERR,
1216 "Couldn't get an skb.\n");
1217 rx_ring->sbq_clean_idx = clean_idx;
1218 return;
1219 }
1220 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1221 map = pci_map_single(qdev->pdev,
1222 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001223 rx_ring->sbq_buf_size,
1224 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001225 if (pci_dma_mapping_error(qdev->pdev, map)) {
1226 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1227 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001228 dev_kfree_skb_any(sbq_desc->p.skb);
1229 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001230 return;
1231 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001232 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1233 pci_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001235 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001236 }
1237
1238 clean_idx++;
1239 if (clean_idx == rx_ring->sbq_len)
1240 clean_idx = 0;
1241 }
1242 rx_ring->sbq_clean_idx = clean_idx;
1243 rx_ring->sbq_prod_idx += 16;
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1245 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001246 rx_ring->sbq_free_cnt -= 16;
1247 }
1248
1249 if (start_idx != clean_idx) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001250 QPRINTK(qdev, RX_STATUS, DEBUG,
1251 "sbq: updating prod idx = %d.\n",
1252 rx_ring->sbq_prod_idx);
1253 ql_write_db_reg(rx_ring->sbq_prod_idx,
1254 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001255 }
1256}
1257
1258static void ql_update_buffer_queues(struct ql_adapter *qdev,
1259 struct rx_ring *rx_ring)
1260{
1261 ql_update_sbq(qdev, rx_ring);
1262 ql_update_lbq(qdev, rx_ring);
1263}
1264
1265/* Unmaps tx buffers. Can be called from send() if a pci mapping
1266 * fails at some stage, or from the interrupt when a tx completes.
1267 */
1268static void ql_unmap_send(struct ql_adapter *qdev,
1269 struct tx_ring_desc *tx_ring_desc, int mapped)
1270{
1271 int i;
1272 for (i = 0; i < mapped; i++) {
1273 if (i == 0 || (i == 7 && mapped > 7)) {
1274 /*
1275 * Unmap the skb->data area, or the
1276 * external sglist (AKA the Outbound
1277 * Address List (OAL)).
1278 * If its the zeroeth element, then it's
1279 * the skb->data area. If it's the 7th
1280 * element and there is more than 6 frags,
1281 * then its an OAL.
1282 */
1283 if (i == 7) {
1284 QPRINTK(qdev, TX_DONE, DEBUG,
1285 "unmapping OAL area.\n");
1286 }
1287 pci_unmap_single(qdev->pdev,
1288 pci_unmap_addr(&tx_ring_desc->map[i],
1289 mapaddr),
1290 pci_unmap_len(&tx_ring_desc->map[i],
1291 maplen),
1292 PCI_DMA_TODEVICE);
1293 } else {
1294 QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
1295 i);
1296 pci_unmap_page(qdev->pdev,
1297 pci_unmap_addr(&tx_ring_desc->map[i],
1298 mapaddr),
1299 pci_unmap_len(&tx_ring_desc->map[i],
1300 maplen), PCI_DMA_TODEVICE);
1301 }
1302 }
1303
1304}
1305
1306/* Map the buffers for this transmit. This will return
1307 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1308 */
1309static int ql_map_send(struct ql_adapter *qdev,
1310 struct ob_mac_iocb_req *mac_iocb_ptr,
1311 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1312{
1313 int len = skb_headlen(skb);
1314 dma_addr_t map;
1315 int frag_idx, err, map_idx = 0;
1316 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1317 int frag_cnt = skb_shinfo(skb)->nr_frags;
1318
1319 if (frag_cnt) {
1320 QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
1321 }
1322 /*
1323 * Map the skb buffer first.
1324 */
1325 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1326
1327 err = pci_dma_mapping_error(qdev->pdev, map);
1328 if (err) {
1329 QPRINTK(qdev, TX_QUEUED, ERR,
1330 "PCI mapping failed with error: %d\n", err);
1331
1332 return NETDEV_TX_BUSY;
1333 }
1334
1335 tbd->len = cpu_to_le32(len);
1336 tbd->addr = cpu_to_le64(map);
1337 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1338 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1339 map_idx++;
1340
1341 /*
1342 * This loop fills the remainder of the 8 address descriptors
1343 * in the IOCB. If there are more than 7 fragments, then the
1344 * eighth address desc will point to an external list (OAL).
1345 * When this happens, the remainder of the frags will be stored
1346 * in this list.
1347 */
1348 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1349 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1350 tbd++;
1351 if (frag_idx == 6 && frag_cnt > 7) {
1352 /* Let's tack on an sglist.
1353 * Our control block will now
1354 * look like this:
1355 * iocb->seg[0] = skb->data
1356 * iocb->seg[1] = frag[0]
1357 * iocb->seg[2] = frag[1]
1358 * iocb->seg[3] = frag[2]
1359 * iocb->seg[4] = frag[3]
1360 * iocb->seg[5] = frag[4]
1361 * iocb->seg[6] = frag[5]
1362 * iocb->seg[7] = ptr to OAL (external sglist)
1363 * oal->seg[0] = frag[6]
1364 * oal->seg[1] = frag[7]
1365 * oal->seg[2] = frag[8]
1366 * oal->seg[3] = frag[9]
1367 * oal->seg[4] = frag[10]
1368 * etc...
1369 */
1370 /* Tack on the OAL in the eighth segment of IOCB. */
1371 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1372 sizeof(struct oal),
1373 PCI_DMA_TODEVICE);
1374 err = pci_dma_mapping_error(qdev->pdev, map);
1375 if (err) {
1376 QPRINTK(qdev, TX_QUEUED, ERR,
1377 "PCI mapping outbound address list with error: %d\n",
1378 err);
1379 goto map_error;
1380 }
1381
1382 tbd->addr = cpu_to_le64(map);
1383 /*
1384 * The length is the number of fragments
1385 * that remain to be mapped times the length
1386 * of our sglist (OAL).
1387 */
1388 tbd->len =
1389 cpu_to_le32((sizeof(struct tx_buf_desc) *
1390 (frag_cnt - frag_idx)) | TX_DESC_C);
1391 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1392 map);
1393 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1394 sizeof(struct oal));
1395 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1396 map_idx++;
1397 }
1398
1399 map =
1400 pci_map_page(qdev->pdev, frag->page,
1401 frag->page_offset, frag->size,
1402 PCI_DMA_TODEVICE);
1403
1404 err = pci_dma_mapping_error(qdev->pdev, map);
1405 if (err) {
1406 QPRINTK(qdev, TX_QUEUED, ERR,
1407 "PCI mapping frags failed with error: %d.\n",
1408 err);
1409 goto map_error;
1410 }
1411
1412 tbd->addr = cpu_to_le64(map);
1413 tbd->len = cpu_to_le32(frag->size);
1414 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1415 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1416 frag->size);
1417
1418 }
1419 /* Save the number of segments we've mapped. */
1420 tx_ring_desc->map_cnt = map_idx;
1421 /* Terminate the last segment. */
1422 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1423 return NETDEV_TX_OK;
1424
1425map_error:
1426 /*
1427 * If the first frag mapping failed, then i will be zero.
1428 * This causes the unmap of the skb->data area. Otherwise
1429 * we pass in the number of frags that mapped successfully
1430 * so they can be umapped.
1431 */
1432 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1433 return NETDEV_TX_BUSY;
1434}
1435
Ron Mercer4f848c02010-01-02 10:37:43 +00001436/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001437static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1438 struct rx_ring *rx_ring,
1439 struct ib_mac_iocb_rsp *ib_mac_rsp,
1440 u32 length,
1441 u16 vlan_id)
1442{
1443 struct sk_buff *skb;
1444 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1445 struct skb_frag_struct *rx_frag;
1446 int nr_frags;
1447 struct napi_struct *napi = &rx_ring->napi;
1448
1449 napi->dev = qdev->ndev;
1450
1451 skb = napi_get_frags(napi);
1452 if (!skb) {
1453 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
1454 rx_ring->rx_dropped++;
1455 put_page(lbq_desc->p.pg_chunk.page);
1456 return;
1457 }
1458 prefetch(lbq_desc->p.pg_chunk.va);
1459 rx_frag = skb_shinfo(skb)->frags;
1460 nr_frags = skb_shinfo(skb)->nr_frags;
1461 rx_frag += nr_frags;
1462 rx_frag->page = lbq_desc->p.pg_chunk.page;
1463 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1464 rx_frag->size = length;
1465
1466 skb->len += length;
1467 skb->data_len += length;
1468 skb->truesize += length;
1469 skb_shinfo(skb)->nr_frags++;
1470
1471 rx_ring->rx_packets++;
1472 rx_ring->rx_bytes += length;
1473 skb->ip_summed = CHECKSUM_UNNECESSARY;
1474 skb_record_rx_queue(skb, rx_ring->cq_id);
1475 if (qdev->vlgrp && (vlan_id != 0xffff))
1476 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1477 else
1478 napi_gro_frags(napi);
1479}
1480
1481/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001482static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1483 struct rx_ring *rx_ring,
1484 struct ib_mac_iocb_rsp *ib_mac_rsp,
1485 u32 length,
1486 u16 vlan_id)
1487{
1488 struct net_device *ndev = qdev->ndev;
1489 struct sk_buff *skb = NULL;
1490 void *addr;
1491 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1492 struct napi_struct *napi = &rx_ring->napi;
1493
1494 skb = netdev_alloc_skb(ndev, length);
1495 if (!skb) {
1496 QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
1497 "need to unwind!.\n");
1498 rx_ring->rx_dropped++;
1499 put_page(lbq_desc->p.pg_chunk.page);
1500 return;
1501 }
1502
1503 addr = lbq_desc->p.pg_chunk.va;
1504 prefetch(addr);
1505
1506
1507 /* Frame error, so drop the packet. */
1508 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1509 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1510 ib_mac_rsp->flags2);
1511 rx_ring->rx_errors++;
1512 goto err_out;
1513 }
1514
1515 /* The max framesize filter on this chip is set higher than
1516 * MTU since FCoE uses 2k frames.
1517 */
1518 if (skb->len > ndev->mtu + ETH_HLEN) {
1519 QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
1520 rx_ring->rx_dropped++;
1521 goto err_out;
1522 }
1523 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1524 QPRINTK(qdev, RX_STATUS, DEBUG,
1525 "%d bytes of headers and data in large. Chain "
1526 "page to new skb and pull tail.\n", length);
1527 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1528 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1529 length-ETH_HLEN);
1530 skb->len += length-ETH_HLEN;
1531 skb->data_len += length-ETH_HLEN;
1532 skb->truesize += length-ETH_HLEN;
1533
1534 rx_ring->rx_packets++;
1535 rx_ring->rx_bytes += skb->len;
1536 skb->protocol = eth_type_trans(skb, ndev);
1537 skb->ip_summed = CHECKSUM_NONE;
1538
1539 if (qdev->rx_csum &&
1540 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1541 /* TCP frame. */
1542 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1543 QPRINTK(qdev, RX_STATUS, DEBUG,
1544 "TCP checksum done!\n");
1545 skb->ip_summed = CHECKSUM_UNNECESSARY;
1546 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1547 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1548 /* Unfragmented ipv4 UDP frame. */
1549 struct iphdr *iph = (struct iphdr *) skb->data;
1550 if (!(iph->frag_off &
1551 cpu_to_be16(IP_MF|IP_OFFSET))) {
1552 skb->ip_summed = CHECKSUM_UNNECESSARY;
1553 QPRINTK(qdev, RX_STATUS, DEBUG,
1554 "TCP checksum done!\n");
1555 }
1556 }
1557 }
1558
1559 skb_record_rx_queue(skb, rx_ring->cq_id);
1560 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1561 if (qdev->vlgrp && (vlan_id != 0xffff))
1562 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1563 else
1564 napi_gro_receive(napi, skb);
1565 } else {
1566 if (qdev->vlgrp && (vlan_id != 0xffff))
1567 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1568 else
1569 netif_receive_skb(skb);
1570 }
1571 return;
1572err_out:
1573 dev_kfree_skb_any(skb);
1574 put_page(lbq_desc->p.pg_chunk.page);
1575}
1576
1577/* Process an inbound completion from an rx ring. */
1578static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1579 struct rx_ring *rx_ring,
1580 struct ib_mac_iocb_rsp *ib_mac_rsp,
1581 u32 length,
1582 u16 vlan_id)
1583{
1584 struct net_device *ndev = qdev->ndev;
1585 struct sk_buff *skb = NULL;
1586 struct sk_buff *new_skb = NULL;
1587 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1588
1589 skb = sbq_desc->p.skb;
1590 /* Allocate new_skb and copy */
1591 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1592 if (new_skb == NULL) {
1593 QPRINTK(qdev, PROBE, ERR,
1594 "No skb available, drop the packet.\n");
1595 rx_ring->rx_dropped++;
1596 return;
1597 }
1598 skb_reserve(new_skb, NET_IP_ALIGN);
1599 memcpy(skb_put(new_skb, length), skb->data, length);
1600 skb = new_skb;
1601
1602 /* Frame error, so drop the packet. */
1603 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1604 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1605 ib_mac_rsp->flags2);
1606 dev_kfree_skb_any(skb);
1607 rx_ring->rx_errors++;
1608 return;
1609 }
1610
1611 /* loopback self test for ethtool */
1612 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1613 ql_check_lb_frame(qdev, skb);
1614 dev_kfree_skb_any(skb);
1615 return;
1616 }
1617
1618 /* The max framesize filter on this chip is set higher than
1619 * MTU since FCoE uses 2k frames.
1620 */
1621 if (skb->len > ndev->mtu + ETH_HLEN) {
1622 dev_kfree_skb_any(skb);
1623 rx_ring->rx_dropped++;
1624 return;
1625 }
1626
1627 prefetch(skb->data);
1628 skb->dev = ndev;
1629 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1630 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1631 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1632 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1633 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1634 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1635 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1636 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1637 }
1638 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1639 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1640
1641 rx_ring->rx_packets++;
1642 rx_ring->rx_bytes += skb->len;
1643 skb->protocol = eth_type_trans(skb, ndev);
1644 skb->ip_summed = CHECKSUM_NONE;
1645
1646 /* If rx checksum is on, and there are no
1647 * csum or frame errors.
1648 */
1649 if (qdev->rx_csum &&
1650 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1651 /* TCP frame. */
1652 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1653 QPRINTK(qdev, RX_STATUS, DEBUG,
1654 "TCP checksum done!\n");
1655 skb->ip_summed = CHECKSUM_UNNECESSARY;
1656 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1657 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1658 /* Unfragmented ipv4 UDP frame. */
1659 struct iphdr *iph = (struct iphdr *) skb->data;
1660 if (!(iph->frag_off &
1661 cpu_to_be16(IP_MF|IP_OFFSET))) {
1662 skb->ip_summed = CHECKSUM_UNNECESSARY;
1663 QPRINTK(qdev, RX_STATUS, DEBUG,
1664 "TCP checksum done!\n");
1665 }
1666 }
1667 }
1668
1669 skb_record_rx_queue(skb, rx_ring->cq_id);
1670 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1671 if (qdev->vlgrp && (vlan_id != 0xffff))
1672 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1673 vlan_id, skb);
1674 else
1675 napi_gro_receive(&rx_ring->napi, skb);
1676 } else {
1677 if (qdev->vlgrp && (vlan_id != 0xffff))
1678 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1679 else
1680 netif_receive_skb(skb);
1681 }
1682}
1683
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001684static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001685{
1686 void *temp_addr = skb->data;
1687
1688 /* Undo the skb_reserve(skb,32) we did before
1689 * giving to hardware, and realign data on
1690 * a 2-byte boundary.
1691 */
1692 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1693 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1694 skb_copy_to_linear_data(skb, temp_addr,
1695 (unsigned int)len);
1696}
1697
1698/*
1699 * This function builds an skb for the given inbound
1700 * completion. It will be rewritten for readability in the near
1701 * future, but for not it works well.
1702 */
1703static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1704 struct rx_ring *rx_ring,
1705 struct ib_mac_iocb_rsp *ib_mac_rsp)
1706{
1707 struct bq_desc *lbq_desc;
1708 struct bq_desc *sbq_desc;
1709 struct sk_buff *skb = NULL;
1710 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1711 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1712
1713 /*
1714 * Handle the header buffer if present.
1715 */
1716 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1717 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1718 QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
1719 /*
1720 * Headers fit nicely into a small buffer.
1721 */
1722 sbq_desc = ql_get_curr_sbuf(rx_ring);
1723 pci_unmap_single(qdev->pdev,
1724 pci_unmap_addr(sbq_desc, mapaddr),
1725 pci_unmap_len(sbq_desc, maplen),
1726 PCI_DMA_FROMDEVICE);
1727 skb = sbq_desc->p.skb;
1728 ql_realign_skb(skb, hdr_len);
1729 skb_put(skb, hdr_len);
1730 sbq_desc->p.skb = NULL;
1731 }
1732
1733 /*
1734 * Handle the data buffer(s).
1735 */
1736 if (unlikely(!length)) { /* Is there data too? */
1737 QPRINTK(qdev, RX_STATUS, DEBUG,
1738 "No Data buffer in this packet.\n");
1739 return skb;
1740 }
1741
1742 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1743 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1744 QPRINTK(qdev, RX_STATUS, DEBUG,
1745 "Headers in small, data of %d bytes in small, combine them.\n", length);
1746 /*
1747 * Data is less than small buffer size so it's
1748 * stuffed in a small buffer.
1749 * For this case we append the data
1750 * from the "data" small buffer to the "header" small
1751 * buffer.
1752 */
1753 sbq_desc = ql_get_curr_sbuf(rx_ring);
1754 pci_dma_sync_single_for_cpu(qdev->pdev,
1755 pci_unmap_addr
1756 (sbq_desc, mapaddr),
1757 pci_unmap_len
1758 (sbq_desc, maplen),
1759 PCI_DMA_FROMDEVICE);
1760 memcpy(skb_put(skb, length),
1761 sbq_desc->p.skb->data, length);
1762 pci_dma_sync_single_for_device(qdev->pdev,
1763 pci_unmap_addr
1764 (sbq_desc,
1765 mapaddr),
1766 pci_unmap_len
1767 (sbq_desc,
1768 maplen),
1769 PCI_DMA_FROMDEVICE);
1770 } else {
1771 QPRINTK(qdev, RX_STATUS, DEBUG,
1772 "%d bytes in a single small buffer.\n", length);
1773 sbq_desc = ql_get_curr_sbuf(rx_ring);
1774 skb = sbq_desc->p.skb;
1775 ql_realign_skb(skb, length);
1776 skb_put(skb, length);
1777 pci_unmap_single(qdev->pdev,
1778 pci_unmap_addr(sbq_desc,
1779 mapaddr),
1780 pci_unmap_len(sbq_desc,
1781 maplen),
1782 PCI_DMA_FROMDEVICE);
1783 sbq_desc->p.skb = NULL;
1784 }
1785 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1786 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1787 QPRINTK(qdev, RX_STATUS, DEBUG,
1788 "Header in small, %d bytes in large. Chain large to small!\n", length);
1789 /*
1790 * The data is in a single large buffer. We
1791 * chain it to the header buffer's skb and let
1792 * it rip.
1793 */
Ron Mercer7c734352009-10-19 03:32:19 +00001794 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001795 QPRINTK(qdev, RX_STATUS, DEBUG,
Ron Mercer7c734352009-10-19 03:32:19 +00001796 "Chaining page at offset = %d,"
1797 "for %d bytes to skb.\n",
1798 lbq_desc->p.pg_chunk.offset, length);
1799 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1800 lbq_desc->p.pg_chunk.offset,
1801 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001802 skb->len += length;
1803 skb->data_len += length;
1804 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001805 } else {
1806 /*
1807 * The headers and data are in a single large buffer. We
1808 * copy it to a new skb and let it go. This can happen with
1809 * jumbo mtu on a non-TCP/UDP frame.
1810 */
Ron Mercer7c734352009-10-19 03:32:19 +00001811 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001812 skb = netdev_alloc_skb(qdev->ndev, length);
1813 if (skb == NULL) {
1814 QPRINTK(qdev, PROBE, DEBUG,
1815 "No skb available, drop the packet.\n");
1816 return NULL;
1817 }
Ron Mercer4055c7d2009-01-04 17:07:09 -08001818 pci_unmap_page(qdev->pdev,
1819 pci_unmap_addr(lbq_desc,
1820 mapaddr),
1821 pci_unmap_len(lbq_desc, maplen),
1822 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001823 skb_reserve(skb, NET_IP_ALIGN);
1824 QPRINTK(qdev, RX_STATUS, DEBUG,
1825 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
Ron Mercer7c734352009-10-19 03:32:19 +00001826 skb_fill_page_desc(skb, 0,
1827 lbq_desc->p.pg_chunk.page,
1828 lbq_desc->p.pg_chunk.offset,
1829 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001830 skb->len += length;
1831 skb->data_len += length;
1832 skb->truesize += length;
1833 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001834 __pskb_pull_tail(skb,
1835 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1836 VLAN_ETH_HLEN : ETH_HLEN);
1837 }
1838 } else {
1839 /*
1840 * The data is in a chain of large buffers
1841 * pointed to by a small buffer. We loop
1842 * thru and chain them to the our small header
1843 * buffer's skb.
1844 * frags: There are 18 max frags and our small
1845 * buffer will hold 32 of them. The thing is,
1846 * we'll use 3 max for our 9000 byte jumbo
1847 * frames. If the MTU goes up we could
1848 * eventually be in trouble.
1849 */
Ron Mercer7c734352009-10-19 03:32:19 +00001850 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001851 sbq_desc = ql_get_curr_sbuf(rx_ring);
1852 pci_unmap_single(qdev->pdev,
1853 pci_unmap_addr(sbq_desc, mapaddr),
1854 pci_unmap_len(sbq_desc, maplen),
1855 PCI_DMA_FROMDEVICE);
1856 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1857 /*
1858 * This is an non TCP/UDP IP frame, so
1859 * the headers aren't split into a small
1860 * buffer. We have to use the small buffer
1861 * that contains our sg list as our skb to
1862 * send upstairs. Copy the sg list here to
1863 * a local buffer and use it to find the
1864 * pages to chain.
1865 */
1866 QPRINTK(qdev, RX_STATUS, DEBUG,
1867 "%d bytes of headers & data in chain of large.\n", length);
1868 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001869 sbq_desc->p.skb = NULL;
1870 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001871 }
1872 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001873 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1874 size = (length < rx_ring->lbq_buf_size) ? length :
1875 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001876
1877 QPRINTK(qdev, RX_STATUS, DEBUG,
1878 "Adding page %d to skb for %d bytes.\n",
1879 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001880 skb_fill_page_desc(skb, i,
1881 lbq_desc->p.pg_chunk.page,
1882 lbq_desc->p.pg_chunk.offset,
1883 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001884 skb->len += size;
1885 skb->data_len += size;
1886 skb->truesize += size;
1887 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001888 i++;
1889 }
1890 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1891 VLAN_ETH_HLEN : ETH_HLEN);
1892 }
1893 return skb;
1894}
1895
1896/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001897static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001898 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001899 struct ib_mac_iocb_rsp *ib_mac_rsp,
1900 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001901{
1902 struct net_device *ndev = qdev->ndev;
1903 struct sk_buff *skb = NULL;
1904
1905 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1906
1907 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1908 if (unlikely(!skb)) {
1909 QPRINTK(qdev, RX_STATUS, DEBUG,
1910 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001911 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001912 return;
1913 }
1914
Ron Mercera32959c2009-06-09 05:39:27 +00001915 /* Frame error, so drop the packet. */
1916 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1917 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1918 ib_mac_rsp->flags2);
1919 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001920 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001921 return;
1922 }
Ron Mercerec33a492009-06-09 05:39:28 +00001923
1924 /* The max framesize filter on this chip is set higher than
1925 * MTU since FCoE uses 2k frames.
1926 */
1927 if (skb->len > ndev->mtu + ETH_HLEN) {
1928 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001929 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001930 return;
1931 }
1932
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001933 /* loopback self test for ethtool */
1934 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1935 ql_check_lb_frame(qdev, skb);
1936 dev_kfree_skb_any(skb);
1937 return;
1938 }
1939
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001940 prefetch(skb->data);
1941 skb->dev = ndev;
1942 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1943 QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
1944 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1945 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1946 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1947 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1948 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1949 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001950 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001951 }
1952 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1953 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
1954 }
Ron Mercerd555f592009-03-09 10:59:19 +00001955
Ron Mercerd555f592009-03-09 10:59:19 +00001956 skb->protocol = eth_type_trans(skb, ndev);
1957 skb->ip_summed = CHECKSUM_NONE;
1958
1959 /* If rx checksum is on, and there are no
1960 * csum or frame errors.
1961 */
1962 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00001963 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1964 /* TCP frame. */
1965 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1966 QPRINTK(qdev, RX_STATUS, DEBUG,
1967 "TCP checksum done!\n");
1968 skb->ip_summed = CHECKSUM_UNNECESSARY;
1969 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1970 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1971 /* Unfragmented ipv4 UDP frame. */
1972 struct iphdr *iph = (struct iphdr *) skb->data;
1973 if (!(iph->frag_off &
1974 cpu_to_be16(IP_MF|IP_OFFSET))) {
1975 skb->ip_summed = CHECKSUM_UNNECESSARY;
1976 QPRINTK(qdev, RX_STATUS, DEBUG,
1977 "TCP checksum done!\n");
1978 }
1979 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001980 }
Ron Mercerd555f592009-03-09 10:59:19 +00001981
Ron Mercer885ee392009-11-03 13:49:31 +00001982 rx_ring->rx_packets++;
1983 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00001984 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001985 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1986 if (qdev->vlgrp &&
1987 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1988 (vlan_id != 0))
1989 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1990 vlan_id, skb);
1991 else
1992 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001993 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00001994 if (qdev->vlgrp &&
1995 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
1996 (vlan_id != 0))
1997 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1998 else
1999 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002000 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002001}
2002
Ron Mercer4f848c02010-01-02 10:37:43 +00002003/* Process an inbound completion from an rx ring. */
2004static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2005 struct rx_ring *rx_ring,
2006 struct ib_mac_iocb_rsp *ib_mac_rsp)
2007{
2008 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2009 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2010 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2011 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2012
2013 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2014
2015 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2016 /* The data and headers are split into
2017 * separate buffers.
2018 */
2019 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2020 vlan_id);
2021 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2022 /* The data fit in a single small buffer.
2023 * Allocate a new skb, copy the data and
2024 * return the buffer to the free pool.
2025 */
2026 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2027 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002028 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2029 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2030 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2031 /* TCP packet in a page chunk that's been checksummed.
2032 * Tack it on to our GRO skb and let it go.
2033 */
2034 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2035 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002036 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2037 /* Non-TCP packet in a page chunk. Allocate an
2038 * skb, tack it on frags, and send it up.
2039 */
2040 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2041 length, vlan_id);
2042 } else {
2043 struct bq_desc *lbq_desc;
2044
2045 /* Free small buffer that holds the IAL */
2046 lbq_desc = ql_get_curr_sbuf(rx_ring);
2047 QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
2048 length, qdev->ndev->mtu);
2049
2050 /* Unwind the large buffers for this frame. */
2051 while (length > 0) {
2052 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2053 length -= (length < rx_ring->lbq_buf_size) ?
2054 length : rx_ring->lbq_buf_size;
2055 put_page(lbq_desc->p.pg_chunk.page);
2056 }
2057 }
2058
2059 return (unsigned long)length;
2060}
2061
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002062/* Process an outbound completion from an rx ring. */
2063static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2064 struct ob_mac_iocb_rsp *mac_rsp)
2065{
2066 struct tx_ring *tx_ring;
2067 struct tx_ring_desc *tx_ring_desc;
2068
2069 QL_DUMP_OB_MAC_RSP(mac_rsp);
2070 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2071 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2072 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002073 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2074 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002075 dev_kfree_skb(tx_ring_desc->skb);
2076 tx_ring_desc->skb = NULL;
2077
2078 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2079 OB_MAC_IOCB_RSP_S |
2080 OB_MAC_IOCB_RSP_L |
2081 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2082 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2083 QPRINTK(qdev, TX_DONE, WARNING,
2084 "Total descriptor length did not match transfer length.\n");
2085 }
2086 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2087 QPRINTK(qdev, TX_DONE, WARNING,
2088 "Frame too short to be legal, not sent.\n");
2089 }
2090 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2091 QPRINTK(qdev, TX_DONE, WARNING,
2092 "Frame too long, but sent anyway.\n");
2093 }
2094 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2095 QPRINTK(qdev, TX_DONE, WARNING,
2096 "PCI backplane error. Frame not sent.\n");
2097 }
2098 }
2099 atomic_inc(&tx_ring->tx_count);
2100}
2101
2102/* Fire up a handler to reset the MPI processor. */
2103void ql_queue_fw_error(struct ql_adapter *qdev)
2104{
Ron Mercer6a473302009-07-02 06:06:12 +00002105 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002106 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2107}
2108
2109void ql_queue_asic_error(struct ql_adapter *qdev)
2110{
Ron Mercer6a473302009-07-02 06:06:12 +00002111 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002112 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002113 /* Clear adapter up bit to signal the recovery
2114 * process that it shouldn't kill the reset worker
2115 * thread
2116 */
2117 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002118 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2119}
2120
2121static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2122 struct ib_ae_iocb_rsp *ib_ae_rsp)
2123{
2124 switch (ib_ae_rsp->event) {
2125 case MGMT_ERR_EVENT:
2126 QPRINTK(qdev, RX_ERR, ERR,
2127 "Management Processor Fatal Error.\n");
2128 ql_queue_fw_error(qdev);
2129 return;
2130
2131 case CAM_LOOKUP_ERR_EVENT:
2132 QPRINTK(qdev, LINK, ERR,
2133 "Multiple CAM hits lookup occurred.\n");
2134 QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
2135 ql_queue_asic_error(qdev);
2136 return;
2137
2138 case SOFT_ECC_ERROR_EVENT:
2139 QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
2140 ql_queue_asic_error(qdev);
2141 break;
2142
2143 case PCI_ERR_ANON_BUF_RD:
2144 QPRINTK(qdev, RX_ERR, ERR,
2145 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2146 ib_ae_rsp->q_id);
2147 ql_queue_asic_error(qdev);
2148 break;
2149
2150 default:
2151 QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
2152 ib_ae_rsp->event);
2153 ql_queue_asic_error(qdev);
2154 break;
2155 }
2156}
2157
2158static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2159{
2160 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002161 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002162 struct ob_mac_iocb_rsp *net_rsp = NULL;
2163 int count = 0;
2164
Ron Mercer1e213302009-03-09 10:59:21 +00002165 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002166 /* While there are entries in the completion queue. */
2167 while (prod != rx_ring->cnsmr_idx) {
2168
2169 QPRINTK(qdev, RX_STATUS, DEBUG,
2170 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2171 prod, rx_ring->cnsmr_idx);
2172
2173 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2174 rmb();
2175 switch (net_rsp->opcode) {
2176
2177 case OPCODE_OB_MAC_TSO_IOCB:
2178 case OPCODE_OB_MAC_IOCB:
2179 ql_process_mac_tx_intr(qdev, net_rsp);
2180 break;
2181 default:
2182 QPRINTK(qdev, RX_STATUS, DEBUG,
2183 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2184 net_rsp->opcode);
2185 }
2186 count++;
2187 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002188 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002189 }
2190 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002191 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2192 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2193 net_rsp != NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002194 if (atomic_read(&tx_ring->queue_stopped) &&
2195 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2196 /*
2197 * The queue got stopped because the tx_ring was full.
2198 * Wake it up, because it's now at least 25% empty.
2199 */
Ron Mercer1e213302009-03-09 10:59:21 +00002200 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002201 }
2202
2203 return count;
2204}
2205
2206static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2207{
2208 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002209 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002210 struct ql_net_rsp_iocb *net_rsp;
2211 int count = 0;
2212
2213 /* While there are entries in the completion queue. */
2214 while (prod != rx_ring->cnsmr_idx) {
2215
2216 QPRINTK(qdev, RX_STATUS, DEBUG,
2217 "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
2218 prod, rx_ring->cnsmr_idx);
2219
2220 net_rsp = rx_ring->curr_entry;
2221 rmb();
2222 switch (net_rsp->opcode) {
2223 case OPCODE_IB_MAC_IOCB:
2224 ql_process_mac_rx_intr(qdev, rx_ring,
2225 (struct ib_mac_iocb_rsp *)
2226 net_rsp);
2227 break;
2228
2229 case OPCODE_IB_AE_IOCB:
2230 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2231 net_rsp);
2232 break;
2233 default:
2234 {
2235 QPRINTK(qdev, RX_STATUS, DEBUG,
2236 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2237 net_rsp->opcode);
2238 }
2239 }
2240 count++;
2241 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002242 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002243 if (count == budget)
2244 break;
2245 }
2246 ql_update_buffer_queues(qdev, rx_ring);
2247 ql_write_cq_idx(rx_ring);
2248 return count;
2249}
2250
2251static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2252{
2253 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2254 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002255 struct rx_ring *trx_ring;
2256 int i, work_done = 0;
2257 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002258
2259 QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
2260 rx_ring->cq_id);
2261
Ron Mercer39aa8162009-08-27 11:02:11 +00002262 /* Service the TX rings first. They start
2263 * right after the RSS rings. */
2264 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2265 trx_ring = &qdev->rx_ring[i];
2266 /* If this TX completion ring belongs to this vector and
2267 * it's not empty then service it.
2268 */
2269 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2270 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2271 trx_ring->cnsmr_idx)) {
2272 QPRINTK(qdev, INTR, DEBUG,
2273 "%s: Servicing TX completion ring %d.\n",
2274 __func__, trx_ring->cq_id);
2275 ql_clean_outbound_rx_ring(trx_ring);
2276 }
2277 }
2278
2279 /*
2280 * Now service the RSS ring if it's active.
2281 */
2282 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2283 rx_ring->cnsmr_idx) {
2284 QPRINTK(qdev, INTR, DEBUG,
2285 "%s: Servicing RX completion ring %d.\n",
2286 __func__, rx_ring->cq_id);
2287 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2288 }
2289
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002290 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002291 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002292 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2293 }
2294 return work_done;
2295}
2296
Ron Mercer01e6b952009-10-30 12:13:34 +00002297static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002298{
2299 struct ql_adapter *qdev = netdev_priv(ndev);
2300
2301 qdev->vlgrp = grp;
2302 if (grp) {
2303 QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
2304 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2305 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2306 } else {
2307 QPRINTK(qdev, IFUP, DEBUG,
2308 "Turning off VLAN in NIC_RCV_CFG.\n");
2309 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2310 }
2311}
2312
Ron Mercer01e6b952009-10-30 12:13:34 +00002313static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002314{
2315 struct ql_adapter *qdev = netdev_priv(ndev);
2316 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002317 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002318
Ron Mercercc288f52009-02-23 10:42:14 +00002319 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2320 if (status)
2321 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002322 if (ql_set_mac_addr_reg
2323 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2324 QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
2325 }
Ron Mercercc288f52009-02-23 10:42:14 +00002326 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002327}
2328
Ron Mercer01e6b952009-10-30 12:13:34 +00002329static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002330{
2331 struct ql_adapter *qdev = netdev_priv(ndev);
2332 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002333 int status;
2334
2335 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2336 if (status)
2337 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002338
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002339 if (ql_set_mac_addr_reg
2340 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
2341 QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
2342 }
Ron Mercercc288f52009-02-23 10:42:14 +00002343 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002344
2345}
2346
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002347/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2348static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2349{
2350 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002351 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002352 return IRQ_HANDLED;
2353}
2354
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002355/* This handles a fatal error, MPI activity, and the default
2356 * rx_ring in an MSI-X multiple vector environment.
2357 * In MSI/Legacy environment it also process the rest of
2358 * the rx_rings.
2359 */
2360static irqreturn_t qlge_isr(int irq, void *dev_id)
2361{
2362 struct rx_ring *rx_ring = dev_id;
2363 struct ql_adapter *qdev = rx_ring->qdev;
2364 struct intr_context *intr_context = &qdev->intr_context[0];
2365 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002366 int work_done = 0;
2367
Ron Mercerbb0d2152008-10-20 10:30:26 -07002368 spin_lock(&qdev->hw_lock);
2369 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2370 QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
2371 spin_unlock(&qdev->hw_lock);
2372 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002373 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002374 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002375
Ron Mercerbb0d2152008-10-20 10:30:26 -07002376 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002377
2378 /*
2379 * Check for fatal error.
2380 */
2381 if (var & STS_FE) {
2382 ql_queue_asic_error(qdev);
2383 QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
2384 var = ql_read32(qdev, ERR_STS);
2385 QPRINTK(qdev, INTR, ERR,
2386 "Resetting chip. Error Status Register = 0x%x\n", var);
2387 return IRQ_HANDLED;
2388 }
2389
2390 /*
2391 * Check MPI processor activity.
2392 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002393 if ((var & STS_PI) &&
2394 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002395 /*
2396 * We've got an async event or mailbox completion.
2397 * Handle it and clear the source of the interrupt.
2398 */
2399 QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
2400 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002401 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2402 queue_delayed_work_on(smp_processor_id(),
2403 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002404 work_done++;
2405 }
2406
2407 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002408 * Get the bit-mask that shows the active queues for this
2409 * pass. Compare it to the queues that this irq services
2410 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002411 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002412 var = ql_read32(qdev, ISR1);
2413 if (var & intr_context->irq_mask) {
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002414 QPRINTK(qdev, INTR, INFO,
Ron Mercer39aa8162009-08-27 11:02:11 +00002415 "Waking handler for rx_ring[0].\n");
2416 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002417 napi_schedule(&rx_ring->napi);
2418 work_done++;
2419 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002420 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002421 return work_done ? IRQ_HANDLED : IRQ_NONE;
2422}
2423
2424static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2425{
2426
2427 if (skb_is_gso(skb)) {
2428 int err;
2429 if (skb_header_cloned(skb)) {
2430 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2431 if (err)
2432 return err;
2433 }
2434
2435 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2436 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2437 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2438 mac_iocb_ptr->total_hdrs_len =
2439 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2440 mac_iocb_ptr->net_trans_offset =
2441 cpu_to_le16(skb_network_offset(skb) |
2442 skb_transport_offset(skb)
2443 << OB_MAC_TRANSPORT_HDR_SHIFT);
2444 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2445 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2446 if (likely(skb->protocol == htons(ETH_P_IP))) {
2447 struct iphdr *iph = ip_hdr(skb);
2448 iph->check = 0;
2449 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2450 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2451 iph->daddr, 0,
2452 IPPROTO_TCP,
2453 0);
2454 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2455 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2456 tcp_hdr(skb)->check =
2457 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2458 &ipv6_hdr(skb)->daddr,
2459 0, IPPROTO_TCP, 0);
2460 }
2461 return 1;
2462 }
2463 return 0;
2464}
2465
2466static void ql_hw_csum_setup(struct sk_buff *skb,
2467 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2468{
2469 int len;
2470 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002471 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002472 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2473 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2474 mac_iocb_ptr->net_trans_offset =
2475 cpu_to_le16(skb_network_offset(skb) |
2476 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2477
2478 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2479 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2480 if (likely(iph->protocol == IPPROTO_TCP)) {
2481 check = &(tcp_hdr(skb)->check);
2482 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2483 mac_iocb_ptr->total_hdrs_len =
2484 cpu_to_le16(skb_transport_offset(skb) +
2485 (tcp_hdr(skb)->doff << 2));
2486 } else {
2487 check = &(udp_hdr(skb)->check);
2488 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2489 mac_iocb_ptr->total_hdrs_len =
2490 cpu_to_le16(skb_transport_offset(skb) +
2491 sizeof(struct udphdr));
2492 }
2493 *check = ~csum_tcpudp_magic(iph->saddr,
2494 iph->daddr, len, iph->protocol, 0);
2495}
2496
Stephen Hemminger613573252009-08-31 19:50:58 +00002497static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002498{
2499 struct tx_ring_desc *tx_ring_desc;
2500 struct ob_mac_iocb_req *mac_iocb_ptr;
2501 struct ql_adapter *qdev = netdev_priv(ndev);
2502 int tso;
2503 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002504 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002505
2506 tx_ring = &qdev->tx_ring[tx_ring_idx];
2507
Ron Mercer74c50b42009-03-09 10:59:27 +00002508 if (skb_padto(skb, ETH_ZLEN))
2509 return NETDEV_TX_OK;
2510
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002511 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2512 QPRINTK(qdev, TX_QUEUED, INFO,
2513 "%s: shutting down tx queue %d du to lack of resources.\n",
2514 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002515 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002516 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002517 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002518 return NETDEV_TX_BUSY;
2519 }
2520 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2521 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002522 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002523
2524 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2525 mac_iocb_ptr->tid = tx_ring_desc->index;
2526 /* We use the upper 32-bits to store the tx queue for this IO.
2527 * When we get the completion we can use it to establish the context.
2528 */
2529 mac_iocb_ptr->txq_idx = tx_ring_idx;
2530 tx_ring_desc->skb = skb;
2531
2532 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2533
2534 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
2535 QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
2536 vlan_tx_tag_get(skb));
2537 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2538 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2539 }
2540 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2541 if (tso < 0) {
2542 dev_kfree_skb_any(skb);
2543 return NETDEV_TX_OK;
2544 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2545 ql_hw_csum_setup(skb,
2546 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2547 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002548 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2549 NETDEV_TX_OK) {
2550 QPRINTK(qdev, TX_QUEUED, ERR,
2551 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002552 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002553 return NETDEV_TX_BUSY;
2554 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002555 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2556 tx_ring->prod_idx++;
2557 if (tx_ring->prod_idx == tx_ring->wq_len)
2558 tx_ring->prod_idx = 0;
2559 wmb();
2560
2561 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002562 QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
2563 tx_ring->prod_idx, skb->len);
2564
2565 atomic_dec(&tx_ring->tx_count);
2566 return NETDEV_TX_OK;
2567}
2568
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002569
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002570static void ql_free_shadow_space(struct ql_adapter *qdev)
2571{
2572 if (qdev->rx_ring_shadow_reg_area) {
2573 pci_free_consistent(qdev->pdev,
2574 PAGE_SIZE,
2575 qdev->rx_ring_shadow_reg_area,
2576 qdev->rx_ring_shadow_reg_dma);
2577 qdev->rx_ring_shadow_reg_area = NULL;
2578 }
2579 if (qdev->tx_ring_shadow_reg_area) {
2580 pci_free_consistent(qdev->pdev,
2581 PAGE_SIZE,
2582 qdev->tx_ring_shadow_reg_area,
2583 qdev->tx_ring_shadow_reg_dma);
2584 qdev->tx_ring_shadow_reg_area = NULL;
2585 }
2586}
2587
2588static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2589{
2590 qdev->rx_ring_shadow_reg_area =
2591 pci_alloc_consistent(qdev->pdev,
2592 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2593 if (qdev->rx_ring_shadow_reg_area == NULL) {
2594 QPRINTK(qdev, IFUP, ERR,
2595 "Allocation of RX shadow space failed.\n");
2596 return -ENOMEM;
2597 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002598 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002599 qdev->tx_ring_shadow_reg_area =
2600 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2601 &qdev->tx_ring_shadow_reg_dma);
2602 if (qdev->tx_ring_shadow_reg_area == NULL) {
2603 QPRINTK(qdev, IFUP, ERR,
2604 "Allocation of TX shadow space failed.\n");
2605 goto err_wqp_sh_area;
2606 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002607 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002608 return 0;
2609
2610err_wqp_sh_area:
2611 pci_free_consistent(qdev->pdev,
2612 PAGE_SIZE,
2613 qdev->rx_ring_shadow_reg_area,
2614 qdev->rx_ring_shadow_reg_dma);
2615 return -ENOMEM;
2616}
2617
2618static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2619{
2620 struct tx_ring_desc *tx_ring_desc;
2621 int i;
2622 struct ob_mac_iocb_req *mac_iocb_ptr;
2623
2624 mac_iocb_ptr = tx_ring->wq_base;
2625 tx_ring_desc = tx_ring->q;
2626 for (i = 0; i < tx_ring->wq_len; i++) {
2627 tx_ring_desc->index = i;
2628 tx_ring_desc->skb = NULL;
2629 tx_ring_desc->queue_entry = mac_iocb_ptr;
2630 mac_iocb_ptr++;
2631 tx_ring_desc++;
2632 }
2633 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2634 atomic_set(&tx_ring->queue_stopped, 0);
2635}
2636
2637static void ql_free_tx_resources(struct ql_adapter *qdev,
2638 struct tx_ring *tx_ring)
2639{
2640 if (tx_ring->wq_base) {
2641 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2642 tx_ring->wq_base, tx_ring->wq_base_dma);
2643 tx_ring->wq_base = NULL;
2644 }
2645 kfree(tx_ring->q);
2646 tx_ring->q = NULL;
2647}
2648
2649static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2650 struct tx_ring *tx_ring)
2651{
2652 tx_ring->wq_base =
2653 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2654 &tx_ring->wq_base_dma);
2655
Joe Perches8e95a202009-12-03 07:58:21 +00002656 if ((tx_ring->wq_base == NULL) ||
2657 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002658 QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
2659 return -ENOMEM;
2660 }
2661 tx_ring->q =
2662 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2663 if (tx_ring->q == NULL)
2664 goto err;
2665
2666 return 0;
2667err:
2668 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2669 tx_ring->wq_base, tx_ring->wq_base_dma);
2670 return -ENOMEM;
2671}
2672
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002673static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002674{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002675 struct bq_desc *lbq_desc;
2676
Ron Mercer7c734352009-10-19 03:32:19 +00002677 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002678
Ron Mercer7c734352009-10-19 03:32:19 +00002679 curr_idx = rx_ring->lbq_curr_idx;
2680 clean_idx = rx_ring->lbq_clean_idx;
2681 while (curr_idx != clean_idx) {
2682 lbq_desc = &rx_ring->lbq[curr_idx];
2683
2684 if (lbq_desc->p.pg_chunk.last_flag) {
2685 pci_unmap_page(qdev->pdev,
2686 lbq_desc->p.pg_chunk.map,
2687 ql_lbq_block_size(qdev),
2688 PCI_DMA_FROMDEVICE);
2689 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002690 }
Ron Mercer7c734352009-10-19 03:32:19 +00002691
2692 put_page(lbq_desc->p.pg_chunk.page);
2693 lbq_desc->p.pg_chunk.page = NULL;
2694
2695 if (++curr_idx == rx_ring->lbq_len)
2696 curr_idx = 0;
2697
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002698 }
2699}
2700
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002701static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002702{
2703 int i;
2704 struct bq_desc *sbq_desc;
2705
2706 for (i = 0; i < rx_ring->sbq_len; i++) {
2707 sbq_desc = &rx_ring->sbq[i];
2708 if (sbq_desc == NULL) {
2709 QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
2710 return;
2711 }
2712 if (sbq_desc->p.skb) {
2713 pci_unmap_single(qdev->pdev,
2714 pci_unmap_addr(sbq_desc, mapaddr),
2715 pci_unmap_len(sbq_desc, maplen),
2716 PCI_DMA_FROMDEVICE);
2717 dev_kfree_skb(sbq_desc->p.skb);
2718 sbq_desc->p.skb = NULL;
2719 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002720 }
2721}
2722
Ron Mercer4545a3f2009-02-23 10:42:17 +00002723/* Free all large and small rx buffers associated
2724 * with the completion queues for this device.
2725 */
2726static void ql_free_rx_buffers(struct ql_adapter *qdev)
2727{
2728 int i;
2729 struct rx_ring *rx_ring;
2730
2731 for (i = 0; i < qdev->rx_ring_count; i++) {
2732 rx_ring = &qdev->rx_ring[i];
2733 if (rx_ring->lbq)
2734 ql_free_lbq_buffers(qdev, rx_ring);
2735 if (rx_ring->sbq)
2736 ql_free_sbq_buffers(qdev, rx_ring);
2737 }
2738}
2739
2740static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2741{
2742 struct rx_ring *rx_ring;
2743 int i;
2744
2745 for (i = 0; i < qdev->rx_ring_count; i++) {
2746 rx_ring = &qdev->rx_ring[i];
2747 if (rx_ring->type != TX_Q)
2748 ql_update_buffer_queues(qdev, rx_ring);
2749 }
2750}
2751
2752static void ql_init_lbq_ring(struct ql_adapter *qdev,
2753 struct rx_ring *rx_ring)
2754{
2755 int i;
2756 struct bq_desc *lbq_desc;
2757 __le64 *bq = rx_ring->lbq_base;
2758
2759 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2760 for (i = 0; i < rx_ring->lbq_len; i++) {
2761 lbq_desc = &rx_ring->lbq[i];
2762 memset(lbq_desc, 0, sizeof(*lbq_desc));
2763 lbq_desc->index = i;
2764 lbq_desc->addr = bq;
2765 bq++;
2766 }
2767}
2768
2769static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002770 struct rx_ring *rx_ring)
2771{
2772 int i;
2773 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002774 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002775
Ron Mercer4545a3f2009-02-23 10:42:17 +00002776 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002777 for (i = 0; i < rx_ring->sbq_len; i++) {
2778 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002779 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002780 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002781 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002782 bq++;
2783 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002784}
2785
2786static void ql_free_rx_resources(struct ql_adapter *qdev,
2787 struct rx_ring *rx_ring)
2788{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002789 /* Free the small buffer queue. */
2790 if (rx_ring->sbq_base) {
2791 pci_free_consistent(qdev->pdev,
2792 rx_ring->sbq_size,
2793 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2794 rx_ring->sbq_base = NULL;
2795 }
2796
2797 /* Free the small buffer queue control blocks. */
2798 kfree(rx_ring->sbq);
2799 rx_ring->sbq = NULL;
2800
2801 /* Free the large buffer queue. */
2802 if (rx_ring->lbq_base) {
2803 pci_free_consistent(qdev->pdev,
2804 rx_ring->lbq_size,
2805 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2806 rx_ring->lbq_base = NULL;
2807 }
2808
2809 /* Free the large buffer queue control blocks. */
2810 kfree(rx_ring->lbq);
2811 rx_ring->lbq = NULL;
2812
2813 /* Free the rx queue. */
2814 if (rx_ring->cq_base) {
2815 pci_free_consistent(qdev->pdev,
2816 rx_ring->cq_size,
2817 rx_ring->cq_base, rx_ring->cq_base_dma);
2818 rx_ring->cq_base = NULL;
2819 }
2820}
2821
2822/* Allocate queues and buffers for this completions queue based
2823 * on the values in the parameter structure. */
2824static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2825 struct rx_ring *rx_ring)
2826{
2827
2828 /*
2829 * Allocate the completion queue for this rx_ring.
2830 */
2831 rx_ring->cq_base =
2832 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2833 &rx_ring->cq_base_dma);
2834
2835 if (rx_ring->cq_base == NULL) {
2836 QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
2837 return -ENOMEM;
2838 }
2839
2840 if (rx_ring->sbq_len) {
2841 /*
2842 * Allocate small buffer queue.
2843 */
2844 rx_ring->sbq_base =
2845 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2846 &rx_ring->sbq_base_dma);
2847
2848 if (rx_ring->sbq_base == NULL) {
2849 QPRINTK(qdev, IFUP, ERR,
2850 "Small buffer queue allocation failed.\n");
2851 goto err_mem;
2852 }
2853
2854 /*
2855 * Allocate small buffer queue control blocks.
2856 */
2857 rx_ring->sbq =
2858 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2859 GFP_KERNEL);
2860 if (rx_ring->sbq == NULL) {
2861 QPRINTK(qdev, IFUP, ERR,
2862 "Small buffer queue control block allocation failed.\n");
2863 goto err_mem;
2864 }
2865
Ron Mercer4545a3f2009-02-23 10:42:17 +00002866 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002867 }
2868
2869 if (rx_ring->lbq_len) {
2870 /*
2871 * Allocate large buffer queue.
2872 */
2873 rx_ring->lbq_base =
2874 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2875 &rx_ring->lbq_base_dma);
2876
2877 if (rx_ring->lbq_base == NULL) {
2878 QPRINTK(qdev, IFUP, ERR,
2879 "Large buffer queue allocation failed.\n");
2880 goto err_mem;
2881 }
2882 /*
2883 * Allocate large buffer queue control blocks.
2884 */
2885 rx_ring->lbq =
2886 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2887 GFP_KERNEL);
2888 if (rx_ring->lbq == NULL) {
2889 QPRINTK(qdev, IFUP, ERR,
2890 "Large buffer queue control block allocation failed.\n");
2891 goto err_mem;
2892 }
2893
Ron Mercer4545a3f2009-02-23 10:42:17 +00002894 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002895 }
2896
2897 return 0;
2898
2899err_mem:
2900 ql_free_rx_resources(qdev, rx_ring);
2901 return -ENOMEM;
2902}
2903
2904static void ql_tx_ring_clean(struct ql_adapter *qdev)
2905{
2906 struct tx_ring *tx_ring;
2907 struct tx_ring_desc *tx_ring_desc;
2908 int i, j;
2909
2910 /*
2911 * Loop through all queues and free
2912 * any resources.
2913 */
2914 for (j = 0; j < qdev->tx_ring_count; j++) {
2915 tx_ring = &qdev->tx_ring[j];
2916 for (i = 0; i < tx_ring->wq_len; i++) {
2917 tx_ring_desc = &tx_ring->q[i];
2918 if (tx_ring_desc && tx_ring_desc->skb) {
2919 QPRINTK(qdev, IFDOWN, ERR,
2920 "Freeing lost SKB %p, from queue %d, index %d.\n",
2921 tx_ring_desc->skb, j,
2922 tx_ring_desc->index);
2923 ql_unmap_send(qdev, tx_ring_desc,
2924 tx_ring_desc->map_cnt);
2925 dev_kfree_skb(tx_ring_desc->skb);
2926 tx_ring_desc->skb = NULL;
2927 }
2928 }
2929 }
2930}
2931
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002932static void ql_free_mem_resources(struct ql_adapter *qdev)
2933{
2934 int i;
2935
2936 for (i = 0; i < qdev->tx_ring_count; i++)
2937 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2938 for (i = 0; i < qdev->rx_ring_count; i++)
2939 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2940 ql_free_shadow_space(qdev);
2941}
2942
2943static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2944{
2945 int i;
2946
2947 /* Allocate space for our shadow registers and such. */
2948 if (ql_alloc_shadow_space(qdev))
2949 return -ENOMEM;
2950
2951 for (i = 0; i < qdev->rx_ring_count; i++) {
2952 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
2953 QPRINTK(qdev, IFUP, ERR,
2954 "RX resource allocation failed.\n");
2955 goto err_mem;
2956 }
2957 }
2958 /* Allocate tx queue resources */
2959 for (i = 0; i < qdev->tx_ring_count; i++) {
2960 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
2961 QPRINTK(qdev, IFUP, ERR,
2962 "TX resource allocation failed.\n");
2963 goto err_mem;
2964 }
2965 }
2966 return 0;
2967
2968err_mem:
2969 ql_free_mem_resources(qdev);
2970 return -ENOMEM;
2971}
2972
2973/* Set up the rx ring control block and pass it to the chip.
2974 * The control block is defined as
2975 * "Completion Queue Initialization Control Block", or cqicb.
2976 */
2977static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2978{
2979 struct cqicb *cqicb = &rx_ring->cqicb;
2980 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00002981 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002982 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00002983 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002984 void __iomem *doorbell_area =
2985 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
2986 int err = 0;
2987 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00002988 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00002989 __le64 *base_indirect_ptr;
2990 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002991
2992 /* Set up the shadow registers for this ring. */
2993 rx_ring->prod_idx_sh_reg = shadow_reg;
2994 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00002995 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002996 shadow_reg += sizeof(u64);
2997 shadow_reg_dma += sizeof(u64);
2998 rx_ring->lbq_base_indirect = shadow_reg;
2999 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003000 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3001 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003002 rx_ring->sbq_base_indirect = shadow_reg;
3003 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3004
3005 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003006 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003007 rx_ring->cnsmr_idx = 0;
3008 rx_ring->curr_entry = rx_ring->cq_base;
3009
3010 /* PCI doorbell mem area + 0x04 for valid register */
3011 rx_ring->valid_db_reg = doorbell_area + 0x04;
3012
3013 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003014 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003015
3016 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003017 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003018
3019 memset((void *)cqicb, 0, sizeof(struct cqicb));
3020 cqicb->msix_vect = rx_ring->irq;
3021
Ron Mercer459caf52009-01-04 17:08:11 -08003022 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3023 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003024
Ron Mercer97345522009-01-09 11:31:50 +00003025 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003026
Ron Mercer97345522009-01-09 11:31:50 +00003027 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003028
3029 /*
3030 * Set up the control block load flags.
3031 */
3032 cqicb->flags = FLAGS_LC | /* Load queue base address */
3033 FLAGS_LV | /* Load MSI-X vector */
3034 FLAGS_LI; /* Load irq delay values */
3035 if (rx_ring->lbq_len) {
3036 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003037 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003038 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3039 page_entries = 0;
3040 do {
3041 *base_indirect_ptr = cpu_to_le64(tmp);
3042 tmp += DB_PAGE_SIZE;
3043 base_indirect_ptr++;
3044 page_entries++;
3045 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003046 cqicb->lbq_addr =
3047 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003048 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3049 (u16) rx_ring->lbq_buf_size;
3050 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3051 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3052 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003053 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003054 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003055 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003056 rx_ring->lbq_clean_idx = 0;
3057 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003058 }
3059 if (rx_ring->sbq_len) {
3060 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003061 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003062 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3063 page_entries = 0;
3064 do {
3065 *base_indirect_ptr = cpu_to_le64(tmp);
3066 tmp += DB_PAGE_SIZE;
3067 base_indirect_ptr++;
3068 page_entries++;
3069 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003070 cqicb->sbq_addr =
3071 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003072 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003073 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003074 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3075 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003076 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003077 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003078 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003079 rx_ring->sbq_clean_idx = 0;
3080 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003081 }
3082 switch (rx_ring->type) {
3083 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003084 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3085 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3086 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003087 case RX_Q:
3088 /* Inbound completion handling rx_rings run in
3089 * separate NAPI contexts.
3090 */
3091 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3092 64);
3093 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3094 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3095 break;
3096 default:
3097 QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
3098 rx_ring->type);
3099 }
Ron Mercer49740972009-02-26 10:08:36 +00003100 QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003101 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3102 CFG_LCQ, rx_ring->cq_id);
3103 if (err) {
3104 QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
3105 return err;
3106 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003107 return err;
3108}
3109
3110static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3111{
3112 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3113 void __iomem *doorbell_area =
3114 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3115 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3116 (tx_ring->wq_id * sizeof(u64));
3117 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3118 (tx_ring->wq_id * sizeof(u64));
3119 int err = 0;
3120
3121 /*
3122 * Assign doorbell registers for this tx_ring.
3123 */
3124 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003125 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003126 tx_ring->prod_idx = 0;
3127 /* TX PCI doorbell mem area + 0x04 */
3128 tx_ring->valid_db_reg = doorbell_area + 0x04;
3129
3130 /*
3131 * Assign shadow registers for this tx_ring.
3132 */
3133 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3134 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3135
3136 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3137 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3138 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3139 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3140 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003141 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142
Ron Mercer97345522009-01-09 11:31:50 +00003143 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003144
3145 ql_init_tx_ring(qdev, tx_ring);
3146
Ron Mercere3324712009-07-02 06:06:13 +00003147 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003148 (u16) tx_ring->wq_id);
3149 if (err) {
3150 QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
3151 return err;
3152 }
Ron Mercer49740972009-02-26 10:08:36 +00003153 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003154 return err;
3155}
3156
3157static void ql_disable_msix(struct ql_adapter *qdev)
3158{
3159 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3160 pci_disable_msix(qdev->pdev);
3161 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3162 kfree(qdev->msi_x_entry);
3163 qdev->msi_x_entry = NULL;
3164 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3165 pci_disable_msi(qdev->pdev);
3166 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3167 }
3168}
3169
Ron Mercera4ab6132009-08-27 11:02:10 +00003170/* We start by trying to get the number of vectors
3171 * stored in qdev->intr_count. If we don't get that
3172 * many then we reduce the count and try again.
3173 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003174static void ql_enable_msix(struct ql_adapter *qdev)
3175{
Ron Mercera4ab6132009-08-27 11:02:10 +00003176 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003177
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003178 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003179 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003180 /* Try to alloc space for the msix struct,
3181 * if it fails then go to MSI/legacy.
3182 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003183 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003184 sizeof(struct msix_entry),
3185 GFP_KERNEL);
3186 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003187 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003188 goto msi;
3189 }
3190
Ron Mercera4ab6132009-08-27 11:02:10 +00003191 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003192 qdev->msi_x_entry[i].entry = i;
3193
Ron Mercera4ab6132009-08-27 11:02:10 +00003194 /* Loop to get our vectors. We start with
3195 * what we want and settle for what we get.
3196 */
3197 do {
3198 err = pci_enable_msix(qdev->pdev,
3199 qdev->msi_x_entry, qdev->intr_count);
3200 if (err > 0)
3201 qdev->intr_count = err;
3202 } while (err > 0);
3203
3204 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003205 kfree(qdev->msi_x_entry);
3206 qdev->msi_x_entry = NULL;
3207 QPRINTK(qdev, IFUP, WARNING,
3208 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003209 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003210 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003211 } else if (err == 0) {
3212 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3213 QPRINTK(qdev, IFUP, INFO,
3214 "MSI-X Enabled, got %d vectors.\n",
3215 qdev->intr_count);
3216 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003217 }
3218 }
3219msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003220 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003221 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003222 if (!pci_enable_msi(qdev->pdev)) {
3223 set_bit(QL_MSI_ENABLED, &qdev->flags);
3224 QPRINTK(qdev, IFUP, INFO,
3225 "Running with MSI interrupts.\n");
3226 return;
3227 }
3228 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003229 qlge_irq_type = LEG_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003230 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
3231}
3232
Ron Mercer39aa8162009-08-27 11:02:11 +00003233/* Each vector services 1 RSS ring and and 1 or more
3234 * TX completion rings. This function loops through
3235 * the TX completion rings and assigns the vector that
3236 * will service it. An example would be if there are
3237 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3238 * This would mean that vector 0 would service RSS ring 0
3239 * and TX competion rings 0,1,2 and 3. Vector 1 would
3240 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3241 */
3242static void ql_set_tx_vect(struct ql_adapter *qdev)
3243{
3244 int i, j, vect;
3245 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3246
3247 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3248 /* Assign irq vectors to TX rx_rings.*/
3249 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3250 i < qdev->rx_ring_count; i++) {
3251 if (j == tx_rings_per_vector) {
3252 vect++;
3253 j = 0;
3254 }
3255 qdev->rx_ring[i].irq = vect;
3256 j++;
3257 }
3258 } else {
3259 /* For single vector all rings have an irq
3260 * of zero.
3261 */
3262 for (i = 0; i < qdev->rx_ring_count; i++)
3263 qdev->rx_ring[i].irq = 0;
3264 }
3265}
3266
3267/* Set the interrupt mask for this vector. Each vector
3268 * will service 1 RSS ring and 1 or more TX completion
3269 * rings. This function sets up a bit mask per vector
3270 * that indicates which rings it services.
3271 */
3272static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3273{
3274 int j, vect = ctx->intr;
3275 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3276
3277 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3278 /* Add the RSS ring serviced by this vector
3279 * to the mask.
3280 */
3281 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3282 /* Add the TX ring(s) serviced by this vector
3283 * to the mask. */
3284 for (j = 0; j < tx_rings_per_vector; j++) {
3285 ctx->irq_mask |=
3286 (1 << qdev->rx_ring[qdev->rss_ring_count +
3287 (vect * tx_rings_per_vector) + j].cq_id);
3288 }
3289 } else {
3290 /* For single vector we just shift each queue's
3291 * ID into the mask.
3292 */
3293 for (j = 0; j < qdev->rx_ring_count; j++)
3294 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3295 }
3296}
3297
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003298/*
3299 * Here we build the intr_context structures based on
3300 * our rx_ring count and intr vector count.
3301 * The intr_context structure is used to hook each vector
3302 * to possibly different handlers.
3303 */
3304static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3305{
3306 int i = 0;
3307 struct intr_context *intr_context = &qdev->intr_context[0];
3308
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003309 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3310 /* Each rx_ring has it's
3311 * own intr_context since we have separate
3312 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003313 */
3314 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3315 qdev->rx_ring[i].irq = i;
3316 intr_context->intr = i;
3317 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003318 /* Set up this vector's bit-mask that indicates
3319 * which queues it services.
3320 */
3321 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003322 /*
3323 * We set up each vectors enable/disable/read bits so
3324 * there's no bit/mask calculations in the critical path.
3325 */
3326 intr_context->intr_en_mask =
3327 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3328 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3329 | i;
3330 intr_context->intr_dis_mask =
3331 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3332 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3333 INTR_EN_IHD | i;
3334 intr_context->intr_read_mask =
3335 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3336 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3337 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003338 if (i == 0) {
3339 /* The first vector/queue handles
3340 * broadcast/multicast, fatal errors,
3341 * and firmware events. This in addition
3342 * to normal inbound NAPI processing.
3343 */
3344 intr_context->handler = qlge_isr;
3345 sprintf(intr_context->name, "%s-rx-%d",
3346 qdev->ndev->name, i);
3347 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003348 /*
3349 * Inbound queues handle unicast frames only.
3350 */
3351 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003352 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003353 qdev->ndev->name, i);
3354 }
3355 }
3356 } else {
3357 /*
3358 * All rx_rings use the same intr_context since
3359 * there is only one vector.
3360 */
3361 intr_context->intr = 0;
3362 intr_context->qdev = qdev;
3363 /*
3364 * We set up each vectors enable/disable/read bits so
3365 * there's no bit/mask calculations in the critical path.
3366 */
3367 intr_context->intr_en_mask =
3368 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3369 intr_context->intr_dis_mask =
3370 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3371 INTR_EN_TYPE_DISABLE;
3372 intr_context->intr_read_mask =
3373 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3374 /*
3375 * Single interrupt means one handler for all rings.
3376 */
3377 intr_context->handler = qlge_isr;
3378 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003379 /* Set up this vector's bit-mask that indicates
3380 * which queues it services. In this case there is
3381 * a single vector so it will service all RSS and
3382 * TX completion rings.
3383 */
3384 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003385 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003386 /* Tell the TX completion rings which MSIx vector
3387 * they will be using.
3388 */
3389 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003390}
3391
3392static void ql_free_irq(struct ql_adapter *qdev)
3393{
3394 int i;
3395 struct intr_context *intr_context = &qdev->intr_context[0];
3396
3397 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3398 if (intr_context->hooked) {
3399 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3400 free_irq(qdev->msi_x_entry[i].vector,
3401 &qdev->rx_ring[i]);
Ron Mercer49740972009-02-26 10:08:36 +00003402 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003403 "freeing msix interrupt %d.\n", i);
3404 } else {
3405 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Ron Mercer49740972009-02-26 10:08:36 +00003406 QPRINTK(qdev, IFDOWN, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003407 "freeing msi interrupt %d.\n", i);
3408 }
3409 }
3410 }
3411 ql_disable_msix(qdev);
3412}
3413
3414static int ql_request_irq(struct ql_adapter *qdev)
3415{
3416 int i;
3417 int status = 0;
3418 struct pci_dev *pdev = qdev->pdev;
3419 struct intr_context *intr_context = &qdev->intr_context[0];
3420
3421 ql_resolve_queues_to_irqs(qdev);
3422
3423 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3424 atomic_set(&intr_context->irq_cnt, 0);
3425 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3426 status = request_irq(qdev->msi_x_entry[i].vector,
3427 intr_context->handler,
3428 0,
3429 intr_context->name,
3430 &qdev->rx_ring[i]);
3431 if (status) {
3432 QPRINTK(qdev, IFUP, ERR,
3433 "Failed request for MSIX interrupt %d.\n",
3434 i);
3435 goto err_irq;
3436 } else {
Ron Mercer49740972009-02-26 10:08:36 +00003437 QPRINTK(qdev, IFUP, DEBUG,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003438 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3439 i,
3440 qdev->rx_ring[i].type ==
3441 DEFAULT_Q ? "DEFAULT_Q" : "",
3442 qdev->rx_ring[i].type ==
3443 TX_Q ? "TX_Q" : "",
3444 qdev->rx_ring[i].type ==
3445 RX_Q ? "RX_Q" : "", intr_context->name);
3446 }
3447 } else {
3448 QPRINTK(qdev, IFUP, DEBUG,
3449 "trying msi or legacy interrupts.\n");
3450 QPRINTK(qdev, IFUP, DEBUG,
3451 "%s: irq = %d.\n", __func__, pdev->irq);
3452 QPRINTK(qdev, IFUP, DEBUG,
3453 "%s: context->name = %s.\n", __func__,
3454 intr_context->name);
3455 QPRINTK(qdev, IFUP, DEBUG,
3456 "%s: dev_id = 0x%p.\n", __func__,
3457 &qdev->rx_ring[0]);
3458 status =
3459 request_irq(pdev->irq, qlge_isr,
3460 test_bit(QL_MSI_ENABLED,
3461 &qdev->
3462 flags) ? 0 : IRQF_SHARED,
3463 intr_context->name, &qdev->rx_ring[0]);
3464 if (status)
3465 goto err_irq;
3466
3467 QPRINTK(qdev, IFUP, ERR,
3468 "Hooked intr %d, queue type %s%s%s, with name %s.\n",
3469 i,
3470 qdev->rx_ring[0].type ==
3471 DEFAULT_Q ? "DEFAULT_Q" : "",
3472 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
3473 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3474 intr_context->name);
3475 }
3476 intr_context->hooked = 1;
3477 }
3478 return status;
3479err_irq:
3480 QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
3481 ql_free_irq(qdev);
3482 return status;
3483}
3484
3485static int ql_start_rss(struct ql_adapter *qdev)
3486{
Ron Mercer541ae282009-10-08 09:54:37 +00003487 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3488 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3489 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3490 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3491 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3492 0xbe, 0xac, 0x01, 0xfa};
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003493 struct ricb *ricb = &qdev->ricb;
3494 int status = 0;
3495 int i;
3496 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3497
Ron Mercere3324712009-07-02 06:06:13 +00003498 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003499
Ron Mercerb2014ff2009-08-27 11:02:09 +00003500 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003501 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003502 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3503 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003504
3505 /*
3506 * Fill out the Indirection Table.
3507 */
Ron Mercer541ae282009-10-08 09:54:37 +00003508 for (i = 0; i < 1024; i++)
3509 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003510
Ron Mercer541ae282009-10-08 09:54:37 +00003511 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3512 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003513
Ron Mercer49740972009-02-26 10:08:36 +00003514 QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003515
Ron Mercere3324712009-07-02 06:06:13 +00003516 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003517 if (status) {
3518 QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
3519 return status;
3520 }
Ron Mercer49740972009-02-26 10:08:36 +00003521 QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003522 return status;
3523}
3524
Ron Mercera5f59dc2009-07-02 06:06:07 +00003525static int ql_clear_routing_entries(struct ql_adapter *qdev)
3526{
3527 int i, status = 0;
3528
3529 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3530 if (status)
3531 return status;
3532 /* Clear all the entries in the routing table. */
3533 for (i = 0; i < 16; i++) {
3534 status = ql_set_routing_reg(qdev, i, 0, 0);
3535 if (status) {
3536 QPRINTK(qdev, IFUP, ERR,
3537 "Failed to init routing register for CAM "
3538 "packets.\n");
3539 break;
3540 }
3541 }
3542 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3543 return status;
3544}
3545
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003546/* Initialize the frame-to-queue routing. */
3547static int ql_route_initialize(struct ql_adapter *qdev)
3548{
3549 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003550
3551 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003552 status = ql_clear_routing_entries(qdev);
3553 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003554 return status;
3555
3556 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3557 if (status)
3558 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003559
3560 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3561 if (status) {
3562 QPRINTK(qdev, IFUP, ERR,
3563 "Failed to init routing register for error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003564 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003565 }
3566 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3567 if (status) {
3568 QPRINTK(qdev, IFUP, ERR,
3569 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003570 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003571 }
3572 /* If we have more than one inbound queue, then turn on RSS in the
3573 * routing block.
3574 */
3575 if (qdev->rss_ring_count > 1) {
3576 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3577 RT_IDX_RSS_MATCH, 1);
3578 if (status) {
3579 QPRINTK(qdev, IFUP, ERR,
3580 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003581 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003582 }
3583 }
3584
3585 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3586 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003587 if (status)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003588 QPRINTK(qdev, IFUP, ERR,
3589 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003590exit:
3591 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003592 return status;
3593}
3594
Ron Mercer2ee1e272009-03-03 12:10:33 +00003595int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003596{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003597 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003598
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003599 /* If check if the link is up and use to
3600 * determine if we are setting or clearing
3601 * the MAC address in the CAM.
3602 */
3603 set = ql_read32(qdev, STS);
3604 set &= qdev->port_link_up;
3605 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003606 if (status) {
3607 QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
3608 return status;
3609 }
3610
3611 status = ql_route_initialize(qdev);
3612 if (status)
3613 QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
3614
3615 return status;
3616}
3617
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003618static int ql_adapter_initialize(struct ql_adapter *qdev)
3619{
3620 u32 value, mask;
3621 int i;
3622 int status = 0;
3623
3624 /*
3625 * Set up the System register to halt on errors.
3626 */
3627 value = SYS_EFE | SYS_FAE;
3628 mask = value << 16;
3629 ql_write32(qdev, SYS, mask | value);
3630
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003631 /* Set the default queue, and VLAN behavior. */
3632 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3633 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003634 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3635
3636 /* Set the MPI interrupt to enabled. */
3637 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3638
3639 /* Enable the function, set pagesize, enable error checking. */
3640 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003641 FSC_EC | FSC_VM_PAGE_4K;
3642 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003643
3644 /* Set/clear header splitting. */
3645 mask = FSC_VM_PAGESIZE_MASK |
3646 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3647 ql_write32(qdev, FSC, mask | value);
3648
Ron Mercer572c5262010-01-02 10:37:42 +00003649 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003650
Ron Mercera3b71932009-10-08 09:54:38 +00003651 /* Set RX packet routing to use port/pci function on which the
3652 * packet arrived on in addition to usual frame routing.
3653 * This is helpful on bonding where both interfaces can have
3654 * the same MAC address.
3655 */
3656 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003657 /* Reroute all packets to our Interface.
3658 * They may have been routed to MPI firmware
3659 * due to WOL.
3660 */
3661 value = ql_read32(qdev, MGMT_RCV_CFG);
3662 value &= ~MGMT_RCV_CFG_RM;
3663 mask = 0xffff0000;
3664
3665 /* Sticky reg needs clearing due to WOL. */
3666 ql_write32(qdev, MGMT_RCV_CFG, mask);
3667 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3668
3669 /* Default WOL is enable on Mezz cards */
3670 if (qdev->pdev->subsystem_device == 0x0068 ||
3671 qdev->pdev->subsystem_device == 0x0180)
3672 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003673
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003674 /* Start up the rx queues. */
3675 for (i = 0; i < qdev->rx_ring_count; i++) {
3676 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3677 if (status) {
3678 QPRINTK(qdev, IFUP, ERR,
3679 "Failed to start rx ring[%d].\n", i);
3680 return status;
3681 }
3682 }
3683
3684 /* If there is more than one inbound completion queue
3685 * then download a RICB to configure RSS.
3686 */
3687 if (qdev->rss_ring_count > 1) {
3688 status = ql_start_rss(qdev);
3689 if (status) {
3690 QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
3691 return status;
3692 }
3693 }
3694
3695 /* Start up the tx queues. */
3696 for (i = 0; i < qdev->tx_ring_count; i++) {
3697 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3698 if (status) {
3699 QPRINTK(qdev, IFUP, ERR,
3700 "Failed to start tx ring[%d].\n", i);
3701 return status;
3702 }
3703 }
3704
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003705 /* Initialize the port and set the max framesize. */
3706 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003707 if (status)
3708 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003709
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003710 /* Set up the MAC address and frame routing filter. */
3711 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003712 if (status) {
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003713 QPRINTK(qdev, IFUP, ERR,
3714 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003715 return status;
3716 }
3717
3718 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003719 for (i = 0; i < qdev->rss_ring_count; i++) {
Ron Mercer49740972009-02-26 10:08:36 +00003720 QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003721 i);
3722 napi_enable(&qdev->rx_ring[i].napi);
3723 }
3724
3725 return status;
3726}
3727
3728/* Issue soft reset to chip. */
3729static int ql_adapter_reset(struct ql_adapter *qdev)
3730{
3731 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003732 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003733 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003734
Ron Mercera5f59dc2009-07-02 06:06:07 +00003735 /* Clear all the entries in the routing table. */
3736 status = ql_clear_routing_entries(qdev);
3737 if (status) {
3738 QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
3739 return status;
3740 }
3741
3742 end_jiffies = jiffies +
3743 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003744
3745 /* Stop management traffic. */
3746 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3747
3748 /* Wait for the NIC and MGMNT FIFOs to empty. */
3749 ql_wait_fifo_empty(qdev);
3750
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003751 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003752
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003753 do {
3754 value = ql_read32(qdev, RST_FO);
3755 if ((value & RST_FO_FR) == 0)
3756 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003757 cpu_relax();
3758 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003759
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003760 if (value & RST_FO_FR) {
3761 QPRINTK(qdev, IFDOWN, ERR,
Jean Delvare3ac49a12009-06-04 16:20:28 +02003762 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003763 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003764 }
3765
Ron Mercer84087f42009-10-08 09:54:41 +00003766 /* Resume management traffic. */
3767 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003768 return status;
3769}
3770
3771static void ql_display_dev_info(struct net_device *ndev)
3772{
3773 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3774
3775 QPRINTK(qdev, PROBE, INFO,
Ron Mercere4552f52009-06-09 05:39:32 +00003776 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003777 "XG Roll = %d, XG Rev = %d.\n",
3778 qdev->func,
Ron Mercere4552f52009-06-09 05:39:32 +00003779 qdev->port,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003780 qdev->chip_rev_id & 0x0000000f,
3781 qdev->chip_rev_id >> 4 & 0x0000000f,
3782 qdev->chip_rev_id >> 8 & 0x0000000f,
3783 qdev->chip_rev_id >> 12 & 0x0000000f);
Johannes Berg7c510e42008-10-27 17:47:26 -07003784 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003785}
3786
Ron Mercerbc083ce2009-10-21 11:07:40 +00003787int ql_wol(struct ql_adapter *qdev)
3788{
3789 int status = 0;
3790 u32 wol = MB_WOL_DISABLE;
3791
3792 /* The CAM is still intact after a reset, but if we
3793 * are doing WOL, then we may need to program the
3794 * routing regs. We would also need to issue the mailbox
3795 * commands to instruct the MPI what to do per the ethtool
3796 * settings.
3797 */
3798
3799 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3800 WAKE_MCAST | WAKE_BCAST)) {
3801 QPRINTK(qdev, IFDOWN, ERR,
3802 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3803 qdev->wol);
3804 return -EINVAL;
3805 }
3806
3807 if (qdev->wol & WAKE_MAGIC) {
3808 status = ql_mb_wol_set_magic(qdev, 1);
3809 if (status) {
3810 QPRINTK(qdev, IFDOWN, ERR,
3811 "Failed to set magic packet on %s.\n",
3812 qdev->ndev->name);
3813 return status;
3814 } else
3815 QPRINTK(qdev, DRV, INFO,
3816 "Enabled magic packet successfully on %s.\n",
3817 qdev->ndev->name);
3818
3819 wol |= MB_WOL_MAGIC_PKT;
3820 }
3821
3822 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003823 wol |= MB_WOL_MODE_ON;
3824 status = ql_mb_wol_mode(qdev, wol);
3825 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3826 (status == 0) ? "Sucessfully set" : "Failed", wol,
3827 qdev->ndev->name);
3828 }
3829
3830 return status;
3831}
3832
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003833static int ql_adapter_down(struct ql_adapter *qdev)
3834{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003835 int i, status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003836
Ron Mercer6a473302009-07-02 06:06:12 +00003837 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003838
Ron Mercer6497b602009-02-12 16:37:13 -08003839 /* Don't kill the reset worker thread if we
3840 * are in the process of recovery.
3841 */
3842 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3843 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003844 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3845 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003846 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00003847 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003848
Ron Mercer39aa8162009-08-27 11:02:11 +00003849 for (i = 0; i < qdev->rss_ring_count; i++)
3850 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003851
3852 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3853
3854 ql_disable_interrupts(qdev);
3855
3856 ql_tx_ring_clean(qdev);
3857
Ron Mercer6b318cb2009-03-09 10:59:26 +00003858 /* Call netif_napi_del() from common point.
3859 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003860 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003861 netif_napi_del(&qdev->rx_ring[i].napi);
3862
Ron Mercer4545a3f2009-02-23 10:42:17 +00003863 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003864
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003865 status = ql_adapter_reset(qdev);
3866 if (status)
3867 QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
3868 qdev->func);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003869 return status;
3870}
3871
3872static int ql_adapter_up(struct ql_adapter *qdev)
3873{
3874 int err = 0;
3875
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003876 err = ql_adapter_initialize(qdev);
3877 if (err) {
3878 QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003879 goto err_init;
3880 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003881 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003882 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003883 /* If the port is initialized and the
3884 * link is up the turn on the carrier.
3885 */
3886 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3887 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003888 ql_link_on(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003889 ql_enable_interrupts(qdev);
3890 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003891 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003892
3893 return 0;
3894err_init:
3895 ql_adapter_reset(qdev);
3896 return err;
3897}
3898
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003899static void ql_release_adapter_resources(struct ql_adapter *qdev)
3900{
3901 ql_free_mem_resources(qdev);
3902 ql_free_irq(qdev);
3903}
3904
3905static int ql_get_adapter_resources(struct ql_adapter *qdev)
3906{
3907 int status = 0;
3908
3909 if (ql_alloc_mem_resources(qdev)) {
3910 QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
3911 return -ENOMEM;
3912 }
3913 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003914 return status;
3915}
3916
3917static int qlge_close(struct net_device *ndev)
3918{
3919 struct ql_adapter *qdev = netdev_priv(ndev);
3920
3921 /*
3922 * Wait for device to recover from a reset.
3923 * (Rarely happens, but possible.)
3924 */
3925 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3926 msleep(1);
3927 ql_adapter_down(qdev);
3928 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003929 return 0;
3930}
3931
3932static int ql_configure_rings(struct ql_adapter *qdev)
3933{
3934 int i;
3935 struct rx_ring *rx_ring;
3936 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00003937 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00003938 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3939 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3940
3941 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003942
Ron Mercera4ab6132009-08-27 11:02:10 +00003943 /* In a perfect world we have one RSS ring for each CPU
3944 * and each has it's own vector. To do that we ask for
3945 * cpu_cnt vectors. ql_enable_msix() will adjust the
3946 * vector count to what we actually get. We then
3947 * allocate an RSS ring for each.
3948 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003949 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003950 qdev->intr_count = cpu_cnt;
3951 ql_enable_msix(qdev);
3952 /* Adjust the RSS ring count to the actual vector count. */
3953 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003954 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003955 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003956
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003957 for (i = 0; i < qdev->tx_ring_count; i++) {
3958 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003959 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003960 tx_ring->qdev = qdev;
3961 tx_ring->wq_id = i;
3962 tx_ring->wq_len = qdev->tx_ring_size;
3963 tx_ring->wq_size =
3964 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
3965
3966 /*
3967 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00003968 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003969 */
Ron Mercer39aa8162009-08-27 11:02:11 +00003970 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003971 }
3972
3973 for (i = 0; i < qdev->rx_ring_count; i++) {
3974 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00003975 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003976 rx_ring->qdev = qdev;
3977 rx_ring->cq_id = i;
3978 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003979 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00003980 /*
3981 * Inbound (RSS) queues.
3982 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003983 rx_ring->cq_len = qdev->rx_ring_size;
3984 rx_ring->cq_size =
3985 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
3986 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3987 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003988 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00003989 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3990 QPRINTK(qdev, IFUP, DEBUG,
3991 "lbq_buf_size %d, order = %d\n",
3992 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003993 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3994 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08003995 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00003996 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00003997 rx_ring->type = RX_Q;
3998 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003999 /*
4000 * Outbound queue handles outbound completions only.
4001 */
4002 /* outbound cq is same size as tx_ring it services. */
4003 rx_ring->cq_len = qdev->tx_ring_size;
4004 rx_ring->cq_size =
4005 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4006 rx_ring->lbq_len = 0;
4007 rx_ring->lbq_size = 0;
4008 rx_ring->lbq_buf_size = 0;
4009 rx_ring->sbq_len = 0;
4010 rx_ring->sbq_size = 0;
4011 rx_ring->sbq_buf_size = 0;
4012 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004013 }
4014 }
4015 return 0;
4016}
4017
4018static int qlge_open(struct net_device *ndev)
4019{
4020 int err = 0;
4021 struct ql_adapter *qdev = netdev_priv(ndev);
4022
Ron Mercer74e12432009-11-11 12:54:04 +00004023 err = ql_adapter_reset(qdev);
4024 if (err)
4025 return err;
4026
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004027 err = ql_configure_rings(qdev);
4028 if (err)
4029 return err;
4030
4031 err = ql_get_adapter_resources(qdev);
4032 if (err)
4033 goto error_up;
4034
4035 err = ql_adapter_up(qdev);
4036 if (err)
4037 goto error_up;
4038
4039 return err;
4040
4041error_up:
4042 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004043 return err;
4044}
4045
Ron Mercer7c734352009-10-19 03:32:19 +00004046static int ql_change_rx_buffers(struct ql_adapter *qdev)
4047{
4048 struct rx_ring *rx_ring;
4049 int i, status;
4050 u32 lbq_buf_len;
4051
4052 /* Wait for an oustanding reset to complete. */
4053 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4054 int i = 3;
4055 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4056 QPRINTK(qdev, IFUP, ERR,
4057 "Waiting for adapter UP...\n");
4058 ssleep(1);
4059 }
4060
4061 if (!i) {
4062 QPRINTK(qdev, IFUP, ERR,
4063 "Timed out waiting for adapter UP\n");
4064 return -ETIMEDOUT;
4065 }
4066 }
4067
4068 status = ql_adapter_down(qdev);
4069 if (status)
4070 goto error;
4071
4072 /* Get the new rx buffer size. */
4073 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4074 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4075 qdev->lbq_buf_order = get_order(lbq_buf_len);
4076
4077 for (i = 0; i < qdev->rss_ring_count; i++) {
4078 rx_ring = &qdev->rx_ring[i];
4079 /* Set the new size. */
4080 rx_ring->lbq_buf_size = lbq_buf_len;
4081 }
4082
4083 status = ql_adapter_up(qdev);
4084 if (status)
4085 goto error;
4086
4087 return status;
4088error:
4089 QPRINTK(qdev, IFUP, ALERT,
4090 "Driver up/down cycle failed, closing device.\n");
4091 set_bit(QL_ADAPTER_UP, &qdev->flags);
4092 dev_close(qdev->ndev);
4093 return status;
4094}
4095
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004096static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4097{
4098 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004099 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004100
4101 if (ndev->mtu == 1500 && new_mtu == 9000) {
4102 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
4103 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
4104 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
4105 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
4106 (ndev->mtu == 9000 && new_mtu == 9000)) {
4107 return 0;
4108 } else
4109 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004110
4111 queue_delayed_work(qdev->workqueue,
4112 &qdev->mpi_port_cfg_work, 3*HZ);
4113
4114 if (!netif_running(qdev->ndev)) {
4115 ndev->mtu = new_mtu;
4116 return 0;
4117 }
4118
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004119 ndev->mtu = new_mtu;
Ron Mercer7c734352009-10-19 03:32:19 +00004120 status = ql_change_rx_buffers(qdev);
4121 if (status) {
4122 QPRINTK(qdev, IFUP, ERR,
4123 "Changing MTU failed.\n");
4124 }
4125
4126 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004127}
4128
4129static struct net_device_stats *qlge_get_stats(struct net_device
4130 *ndev)
4131{
Ron Mercer885ee392009-11-03 13:49:31 +00004132 struct ql_adapter *qdev = netdev_priv(ndev);
4133 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4134 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4135 unsigned long pkts, mcast, dropped, errors, bytes;
4136 int i;
4137
4138 /* Get RX stats. */
4139 pkts = mcast = dropped = errors = bytes = 0;
4140 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4141 pkts += rx_ring->rx_packets;
4142 bytes += rx_ring->rx_bytes;
4143 dropped += rx_ring->rx_dropped;
4144 errors += rx_ring->rx_errors;
4145 mcast += rx_ring->rx_multicast;
4146 }
4147 ndev->stats.rx_packets = pkts;
4148 ndev->stats.rx_bytes = bytes;
4149 ndev->stats.rx_dropped = dropped;
4150 ndev->stats.rx_errors = errors;
4151 ndev->stats.multicast = mcast;
4152
4153 /* Get TX stats. */
4154 pkts = errors = bytes = 0;
4155 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4156 pkts += tx_ring->tx_packets;
4157 bytes += tx_ring->tx_bytes;
4158 errors += tx_ring->tx_errors;
4159 }
4160 ndev->stats.tx_packets = pkts;
4161 ndev->stats.tx_bytes = bytes;
4162 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004163 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004164}
4165
4166static void qlge_set_multicast_list(struct net_device *ndev)
4167{
4168 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4169 struct dev_mc_list *mc_ptr;
Ron Mercercc288f52009-02-23 10:42:14 +00004170 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004171
Ron Mercercc288f52009-02-23 10:42:14 +00004172 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4173 if (status)
4174 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004175 /*
4176 * Set or clear promiscuous mode if a
4177 * transition is taking place.
4178 */
4179 if (ndev->flags & IFF_PROMISC) {
4180 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4181 if (ql_set_routing_reg
4182 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4183 QPRINTK(qdev, HW, ERR,
4184 "Failed to set promiscous mode.\n");
4185 } else {
4186 set_bit(QL_PROMISCUOUS, &qdev->flags);
4187 }
4188 }
4189 } else {
4190 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4191 if (ql_set_routing_reg
4192 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4193 QPRINTK(qdev, HW, ERR,
4194 "Failed to clear promiscous mode.\n");
4195 } else {
4196 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4197 }
4198 }
4199 }
4200
4201 /*
4202 * Set or clear all multicast mode if a
4203 * transition is taking place.
4204 */
4205 if ((ndev->flags & IFF_ALLMULTI) ||
4206 (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
4207 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4208 if (ql_set_routing_reg
4209 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4210 QPRINTK(qdev, HW, ERR,
4211 "Failed to set all-multi mode.\n");
4212 } else {
4213 set_bit(QL_ALLMULTI, &qdev->flags);
4214 }
4215 }
4216 } else {
4217 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4218 if (ql_set_routing_reg
4219 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4220 QPRINTK(qdev, HW, ERR,
4221 "Failed to clear all-multi mode.\n");
4222 } else {
4223 clear_bit(QL_ALLMULTI, &qdev->flags);
4224 }
4225 }
4226 }
4227
4228 if (ndev->mc_count) {
Ron Mercercc288f52009-02-23 10:42:14 +00004229 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4230 if (status)
4231 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004232 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
4233 i++, mc_ptr = mc_ptr->next)
4234 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
4235 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4236 QPRINTK(qdev, HW, ERR,
4237 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004238 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004239 goto exit;
4240 }
Ron Mercercc288f52009-02-23 10:42:14 +00004241 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004242 if (ql_set_routing_reg
4243 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4244 QPRINTK(qdev, HW, ERR,
4245 "Failed to set multicast match mode.\n");
4246 } else {
4247 set_bit(QL_ALLMULTI, &qdev->flags);
4248 }
4249 }
4250exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004251 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004252}
4253
4254static int qlge_set_mac_address(struct net_device *ndev, void *p)
4255{
4256 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4257 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004258 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004259
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004260 if (!is_valid_ether_addr(addr->sa_data))
4261 return -EADDRNOTAVAIL;
4262 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4263
Ron Mercercc288f52009-02-23 10:42:14 +00004264 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4265 if (status)
4266 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004267 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4268 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004269 if (status)
4270 QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
4271 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4272 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004273}
4274
4275static void qlge_tx_timeout(struct net_device *ndev)
4276{
4277 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004278 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004279}
4280
4281static void ql_asic_reset_work(struct work_struct *work)
4282{
4283 struct ql_adapter *qdev =
4284 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004285 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004286 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004287 status = ql_adapter_down(qdev);
4288 if (status)
4289 goto error;
4290
4291 status = ql_adapter_up(qdev);
4292 if (status)
4293 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004294
4295 /* Restore rx mode. */
4296 clear_bit(QL_ALLMULTI, &qdev->flags);
4297 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4298 qlge_set_multicast_list(qdev->ndev);
4299
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004300 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004301 return;
4302error:
4303 QPRINTK(qdev, IFUP, ALERT,
4304 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004305
Ron Mercerdb988122009-03-09 10:59:17 +00004306 set_bit(QL_ADAPTER_UP, &qdev->flags);
4307 dev_close(qdev->ndev);
4308 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004309}
4310
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004311static struct nic_operations qla8012_nic_ops = {
4312 .get_flash = ql_get_8012_flash_params,
4313 .port_initialize = ql_8012_port_initialize,
4314};
4315
Ron Mercercdca8d02009-03-02 08:07:31 +00004316static struct nic_operations qla8000_nic_ops = {
4317 .get_flash = ql_get_8000_flash_params,
4318 .port_initialize = ql_8000_port_initialize,
4319};
4320
Ron Mercere4552f52009-06-09 05:39:32 +00004321/* Find the pcie function number for the other NIC
4322 * on this chip. Since both NIC functions share a
4323 * common firmware we have the lowest enabled function
4324 * do any common work. Examples would be resetting
4325 * after a fatal firmware error, or doing a firmware
4326 * coredump.
4327 */
4328static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004329{
Ron Mercere4552f52009-06-09 05:39:32 +00004330 int status = 0;
4331 u32 temp;
4332 u32 nic_func1, nic_func2;
4333
4334 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4335 &temp);
4336 if (status)
4337 return status;
4338
4339 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4340 MPI_TEST_NIC_FUNC_MASK);
4341 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4342 MPI_TEST_NIC_FUNC_MASK);
4343
4344 if (qdev->func == nic_func1)
4345 qdev->alt_func = nic_func2;
4346 else if (qdev->func == nic_func2)
4347 qdev->alt_func = nic_func1;
4348 else
4349 status = -EIO;
4350
4351 return status;
4352}
4353
4354static int ql_get_board_info(struct ql_adapter *qdev)
4355{
4356 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004357 qdev->func =
4358 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004359 if (qdev->func > 3)
4360 return -EIO;
4361
4362 status = ql_get_alt_pcie_func(qdev);
4363 if (status)
4364 return status;
4365
4366 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4367 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004368 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4369 qdev->port_link_up = STS_PL1;
4370 qdev->port_init = STS_PI1;
4371 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4372 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4373 } else {
4374 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4375 qdev->port_link_up = STS_PL0;
4376 qdev->port_init = STS_PI0;
4377 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4378 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4379 }
4380 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004381 qdev->device_id = qdev->pdev->device;
4382 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4383 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004384 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4385 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004386 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004387}
4388
4389static void ql_release_all(struct pci_dev *pdev)
4390{
4391 struct net_device *ndev = pci_get_drvdata(pdev);
4392 struct ql_adapter *qdev = netdev_priv(ndev);
4393
4394 if (qdev->workqueue) {
4395 destroy_workqueue(qdev->workqueue);
4396 qdev->workqueue = NULL;
4397 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004398
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004399 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004400 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004401 if (qdev->doorbell_area)
4402 iounmap(qdev->doorbell_area);
4403 pci_release_regions(pdev);
4404 pci_set_drvdata(pdev, NULL);
4405}
4406
4407static int __devinit ql_init_device(struct pci_dev *pdev,
4408 struct net_device *ndev, int cards_found)
4409{
4410 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004411 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004412
Ron Mercere3324712009-07-02 06:06:13 +00004413 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004414 err = pci_enable_device(pdev);
4415 if (err) {
4416 dev_err(&pdev->dev, "PCI device enable failed.\n");
4417 return err;
4418 }
4419
Ron Mercerebd6e772009-09-29 08:39:25 +00004420 qdev->ndev = ndev;
4421 qdev->pdev = pdev;
4422 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004423
Ron Mercerbc9167f2009-10-10 09:35:04 +00004424 /* Set PCIe read request size */
4425 err = pcie_set_readrq(pdev, 4096);
4426 if (err) {
4427 dev_err(&pdev->dev, "Set readrq failed.\n");
4428 goto err_out;
4429 }
4430
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004431 err = pci_request_regions(pdev, DRV_NAME);
4432 if (err) {
4433 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004434 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004435 }
4436
4437 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004438 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004439 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004440 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004441 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004442 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004443 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004444 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004445 }
4446
4447 if (err) {
4448 dev_err(&pdev->dev, "No usable DMA configuration.\n");
4449 goto err_out;
4450 }
4451
Ron Mercer73475332009-11-06 07:44:58 +00004452 /* Set PCIe reset type for EEH to fundamental. */
4453 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004454 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004455 qdev->reg_base =
4456 ioremap_nocache(pci_resource_start(pdev, 1),
4457 pci_resource_len(pdev, 1));
4458 if (!qdev->reg_base) {
4459 dev_err(&pdev->dev, "Register mapping failed.\n");
4460 err = -ENOMEM;
4461 goto err_out;
4462 }
4463
4464 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4465 qdev->doorbell_area =
4466 ioremap_nocache(pci_resource_start(pdev, 3),
4467 pci_resource_len(pdev, 3));
4468 if (!qdev->doorbell_area) {
4469 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4470 err = -ENOMEM;
4471 goto err_out;
4472 }
4473
Ron Mercere4552f52009-06-09 05:39:32 +00004474 err = ql_get_board_info(qdev);
4475 if (err) {
4476 dev_err(&pdev->dev, "Register access failed.\n");
4477 err = -EIO;
4478 goto err_out;
4479 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004480 qdev->msg_enable = netif_msg_init(debug, default_msg);
4481 spin_lock_init(&qdev->hw_lock);
4482 spin_lock_init(&qdev->stats_lock);
4483
4484 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004485 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004486 if (err) {
4487 dev_err(&pdev->dev, "Invalid FLASH.\n");
4488 goto err_out;
4489 }
4490
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004491 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4492
4493 /* Set up the default ring sizes. */
4494 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4495 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4496
4497 /* Set up the coalescing parameters. */
4498 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4499 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4500 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4501 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4502
4503 /*
4504 * Set up the operating parameters.
4505 */
4506 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004507 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4508 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4509 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4510 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004511 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004512 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercerbcc2cb3b2009-03-02 08:07:32 +00004513 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004514
4515 if (!cards_found) {
4516 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4517 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4518 DRV_NAME, DRV_VERSION);
4519 }
4520 return 0;
4521err_out:
4522 ql_release_all(pdev);
4523 pci_disable_device(pdev);
4524 return err;
4525}
4526
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004527static const struct net_device_ops qlge_netdev_ops = {
4528 .ndo_open = qlge_open,
4529 .ndo_stop = qlge_close,
4530 .ndo_start_xmit = qlge_send,
4531 .ndo_change_mtu = qlge_change_mtu,
4532 .ndo_get_stats = qlge_get_stats,
4533 .ndo_set_multicast_list = qlge_set_multicast_list,
4534 .ndo_set_mac_address = qlge_set_mac_address,
4535 .ndo_validate_addr = eth_validate_addr,
4536 .ndo_tx_timeout = qlge_tx_timeout,
Ron Mercer01e6b952009-10-30 12:13:34 +00004537 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4538 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4539 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004540};
4541
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004542static int __devinit qlge_probe(struct pci_dev *pdev,
4543 const struct pci_device_id *pci_entry)
4544{
4545 struct net_device *ndev = NULL;
4546 struct ql_adapter *qdev = NULL;
4547 static int cards_found = 0;
4548 int err = 0;
4549
Ron Mercer1e213302009-03-09 10:59:21 +00004550 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4551 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004552 if (!ndev)
4553 return -ENOMEM;
4554
4555 err = ql_init_device(pdev, ndev, cards_found);
4556 if (err < 0) {
4557 free_netdev(ndev);
4558 return err;
4559 }
4560
4561 qdev = netdev_priv(ndev);
4562 SET_NETDEV_DEV(ndev, &pdev->dev);
4563 ndev->features = (0
4564 | NETIF_F_IP_CSUM
4565 | NETIF_F_SG
4566 | NETIF_F_TSO
4567 | NETIF_F_TSO6
4568 | NETIF_F_TSO_ECN
4569 | NETIF_F_HW_VLAN_TX
4570 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00004571 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004572
4573 if (test_bit(QL_DMA64, &qdev->flags))
4574 ndev->features |= NETIF_F_HIGHDMA;
4575
4576 /*
4577 * Set up net_device structure.
4578 */
4579 ndev->tx_queue_len = qdev->tx_ring_size;
4580 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004581
4582 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004583 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004584 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004585
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004586 err = register_netdev(ndev);
4587 if (err) {
4588 dev_err(&pdev->dev, "net device registration failed.\n");
4589 ql_release_all(pdev);
4590 pci_disable_device(pdev);
4591 return err;
4592 }
Ron Mercer6a473302009-07-02 06:06:12 +00004593 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004594 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004595 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004596 cards_found++;
4597 return 0;
4598}
4599
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004600netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4601{
4602 return qlge_send(skb, ndev);
4603}
4604
4605int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4606{
4607 return ql_clean_inbound_rx_ring(rx_ring, budget);
4608}
4609
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004610static void __devexit qlge_remove(struct pci_dev *pdev)
4611{
4612 struct net_device *ndev = pci_get_drvdata(pdev);
4613 unregister_netdev(ndev);
4614 ql_release_all(pdev);
4615 pci_disable_device(pdev);
4616 free_netdev(ndev);
4617}
4618
Ron Mercer6d190c62009-10-28 08:39:20 +00004619/* Clean up resources without touching hardware. */
4620static void ql_eeh_close(struct net_device *ndev)
4621{
4622 int i;
4623 struct ql_adapter *qdev = netdev_priv(ndev);
4624
4625 if (netif_carrier_ok(ndev)) {
4626 netif_carrier_off(ndev);
4627 netif_stop_queue(ndev);
4628 }
4629
4630 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4631 cancel_delayed_work_sync(&qdev->asic_reset_work);
4632 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4633 cancel_delayed_work_sync(&qdev->mpi_work);
4634 cancel_delayed_work_sync(&qdev->mpi_idc_work);
4635 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4636
4637 for (i = 0; i < qdev->rss_ring_count; i++)
4638 netif_napi_del(&qdev->rx_ring[i].napi);
4639
4640 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4641 ql_tx_ring_clean(qdev);
4642 ql_free_rx_buffers(qdev);
4643 ql_release_adapter_resources(qdev);
4644}
4645
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004646/*
4647 * This callback is called by the PCI subsystem whenever
4648 * a PCI bus error is detected.
4649 */
4650static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4651 enum pci_channel_state state)
4652{
4653 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004654
Ron Mercer6d190c62009-10-28 08:39:20 +00004655 switch (state) {
4656 case pci_channel_io_normal:
4657 return PCI_ERS_RESULT_CAN_RECOVER;
4658 case pci_channel_io_frozen:
4659 netif_device_detach(ndev);
4660 if (netif_running(ndev))
4661 ql_eeh_close(ndev);
4662 pci_disable_device(pdev);
4663 return PCI_ERS_RESULT_NEED_RESET;
4664 case pci_channel_io_perm_failure:
4665 dev_err(&pdev->dev,
4666 "%s: pci_channel_io_perm_failure.\n", __func__);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004667 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004668 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004669
4670 /* Request a slot reset. */
4671 return PCI_ERS_RESULT_NEED_RESET;
4672}
4673
4674/*
4675 * This callback is called after the PCI buss has been reset.
4676 * Basically, this tries to restart the card from scratch.
4677 * This is a shortened version of the device probe/discovery code,
4678 * it resembles the first-half of the () routine.
4679 */
4680static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4681{
4682 struct net_device *ndev = pci_get_drvdata(pdev);
4683 struct ql_adapter *qdev = netdev_priv(ndev);
4684
Ron Mercer6d190c62009-10-28 08:39:20 +00004685 pdev->error_state = pci_channel_io_normal;
4686
4687 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004688 if (pci_enable_device(pdev)) {
4689 QPRINTK(qdev, IFUP, ERR,
4690 "Cannot re-enable PCI device after reset.\n");
4691 return PCI_ERS_RESULT_DISCONNECT;
4692 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004693 pci_set_master(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004694 return PCI_ERS_RESULT_RECOVERED;
4695}
4696
4697static void qlge_io_resume(struct pci_dev *pdev)
4698{
4699 struct net_device *ndev = pci_get_drvdata(pdev);
4700 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004701 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004702
Ron Mercer6d190c62009-10-28 08:39:20 +00004703 if (ql_adapter_reset(qdev))
4704 QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004705 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004706 err = qlge_open(ndev);
4707 if (err) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004708 QPRINTK(qdev, IFUP, ERR,
4709 "Device initialization failed after reset.\n");
4710 return;
4711 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004712 } else {
4713 QPRINTK(qdev, IFUP, ERR,
4714 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004715 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004716 netif_device_attach(ndev);
4717}
4718
4719static struct pci_error_handlers qlge_err_handler = {
4720 .error_detected = qlge_io_error_detected,
4721 .slot_reset = qlge_io_slot_reset,
4722 .resume = qlge_io_resume,
4723};
4724
4725static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4726{
4727 struct net_device *ndev = pci_get_drvdata(pdev);
4728 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004729 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004730
4731 netif_device_detach(ndev);
4732
4733 if (netif_running(ndev)) {
4734 err = ql_adapter_down(qdev);
4735 if (!err)
4736 return err;
4737 }
4738
Ron Mercerbc083ce2009-10-21 11:07:40 +00004739 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004740 err = pci_save_state(pdev);
4741 if (err)
4742 return err;
4743
4744 pci_disable_device(pdev);
4745
4746 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4747
4748 return 0;
4749}
4750
David S. Miller04da2cf2008-09-19 16:14:24 -07004751#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004752static int qlge_resume(struct pci_dev *pdev)
4753{
4754 struct net_device *ndev = pci_get_drvdata(pdev);
4755 struct ql_adapter *qdev = netdev_priv(ndev);
4756 int err;
4757
4758 pci_set_power_state(pdev, PCI_D0);
4759 pci_restore_state(pdev);
4760 err = pci_enable_device(pdev);
4761 if (err) {
4762 QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
4763 return err;
4764 }
4765 pci_set_master(pdev);
4766
4767 pci_enable_wake(pdev, PCI_D3hot, 0);
4768 pci_enable_wake(pdev, PCI_D3cold, 0);
4769
4770 if (netif_running(ndev)) {
4771 err = ql_adapter_up(qdev);
4772 if (err)
4773 return err;
4774 }
4775
4776 netif_device_attach(ndev);
4777
4778 return 0;
4779}
David S. Miller04da2cf2008-09-19 16:14:24 -07004780#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004781
4782static void qlge_shutdown(struct pci_dev *pdev)
4783{
4784 qlge_suspend(pdev, PMSG_SUSPEND);
4785}
4786
4787static struct pci_driver qlge_driver = {
4788 .name = DRV_NAME,
4789 .id_table = qlge_pci_tbl,
4790 .probe = qlge_probe,
4791 .remove = __devexit_p(qlge_remove),
4792#ifdef CONFIG_PM
4793 .suspend = qlge_suspend,
4794 .resume = qlge_resume,
4795#endif
4796 .shutdown = qlge_shutdown,
4797 .err_handler = &qlge_err_handler
4798};
4799
4800static int __init qlge_init_module(void)
4801{
4802 return pci_register_driver(&qlge_driver);
4803}
4804
4805static void __exit qlge_exit(void)
4806{
4807 pci_unregister_driver(&qlge_driver);
4808}
4809
4810module_init(qlge_init_module);
4811module_exit(qlge_exit);