blob: 2c052caee88418e6e0001557082a740051d1da02 [file] [log] [blame]
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001/*
2 * QLogic qlge NIC HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 * See LICENSE.qlge for copyright and licensing details.
5 * Author: Linux qlge network device driver by
6 * Ron Mercer <ron.mercer@qlogic.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/pagemap.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dmapool.h>
19#include <linux/mempool.h>
20#include <linux/spinlock.h>
21#include <linux/kthread.h>
22#include <linux/interrupt.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/ipv6.h>
28#include <net/ipv6.h>
29#include <linux/tcp.h>
30#include <linux/udp.h>
31#include <linux/if_arp.h>
32#include <linux/if_ether.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/ethtool.h>
36#include <linux/skbuff.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040037#include <linux/if_vlan.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040038#include <linux/delay.h>
39#include <linux/mm.h>
40#include <linux/vmalloc.h>
Kamalesh Babulalb7c6bfb2008-10-13 18:41:01 -070041#include <net/ip6_checksum.h>
Ron Mercerc4e84bd2008-09-18 11:56:28 -040042
43#include "qlge.h"
44
45char qlge_driver_name[] = DRV_NAME;
46const char qlge_driver_version[] = DRV_VERSION;
47
48MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
49MODULE_DESCRIPTION(DRV_STRING " ");
50MODULE_LICENSE("GPL");
51MODULE_VERSION(DRV_VERSION);
52
53static const u32 default_msg =
54 NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
55/* NETIF_MSG_TIMER | */
56 NETIF_MSG_IFDOWN |
57 NETIF_MSG_IFUP |
58 NETIF_MSG_RX_ERR |
59 NETIF_MSG_TX_ERR |
Ron Mercer49740972009-02-26 10:08:36 +000060/* NETIF_MSG_TX_QUEUED | */
61/* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
Ron Mercerc4e84bd2008-09-18 11:56:28 -040062/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64
65static int debug = 0x00007fff; /* defaults above */
66module_param(debug, int, 0);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
69#define MSIX_IRQ 0
70#define MSI_IRQ 1
71#define LEG_IRQ 2
Ron Mercera5a62a12009-11-11 12:54:05 +000072static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
Ron Mercerc4e84bd2008-09-18 11:56:28 -040075
Ron Mercer8aae2602010-01-15 13:31:28 +000076static int qlge_mpi_coredump;
77module_param(qlge_mpi_coredump, int, 0);
78MODULE_PARM_DESC(qlge_mpi_coredump,
79 "Option to enable MPI firmware dump. "
Ron Mercerd5c1da52010-01-15 13:31:34 +000080 "Default is OFF - Do Not allocate memory. ");
81
82static int qlge_force_coredump;
83module_param(qlge_force_coredump, int, 0);
84MODULE_PARM_DESC(qlge_force_coredump,
85 "Option to allow force of firmware core dump. "
86 "Default is OFF - Do not allow.");
Ron Mercer8aae2602010-01-15 13:31:28 +000087
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000088static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
Ron Mercerb0c2aad2009-02-26 10:08:35 +000089 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
Ron Mercercdca8d02009-03-02 08:07:31 +000090 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
Ron Mercerc4e84bd2008-09-18 11:56:28 -040091 /* required last entry */
92 {0,}
93};
94
95MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
96
97/* This hardware semaphore causes exclusive access to
98 * resources shared between the NIC driver, MPI firmware,
99 * FCOE firmware and the FC driver.
100 */
101static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
102{
103 u32 sem_bits = 0;
104
105 switch (sem_mask) {
106 case SEM_XGMAC0_MASK:
107 sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
108 break;
109 case SEM_XGMAC1_MASK:
110 sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
111 break;
112 case SEM_ICB_MASK:
113 sem_bits = SEM_SET << SEM_ICB_SHIFT;
114 break;
115 case SEM_MAC_ADDR_MASK:
116 sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
117 break;
118 case SEM_FLASH_MASK:
119 sem_bits = SEM_SET << SEM_FLASH_SHIFT;
120 break;
121 case SEM_PROBE_MASK:
122 sem_bits = SEM_SET << SEM_PROBE_SHIFT;
123 break;
124 case SEM_RT_IDX_MASK:
125 sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
126 break;
127 case SEM_PROC_REG_MASK:
128 sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
129 break;
130 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000131 netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400132 return -EINVAL;
133 }
134
135 ql_write32(qdev, SEM, sem_bits | sem_mask);
136 return !(ql_read32(qdev, SEM) & sem_bits);
137}
138
139int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
140{
Ron Mercer0857e9d2009-01-09 11:31:52 +0000141 unsigned int wait_count = 30;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400142 do {
143 if (!ql_sem_trylock(qdev, sem_mask))
144 return 0;
Ron Mercer0857e9d2009-01-09 11:31:52 +0000145 udelay(100);
146 } while (--wait_count);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400147 return -ETIMEDOUT;
148}
149
150void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
151{
152 ql_write32(qdev, SEM, sem_mask);
153 ql_read32(qdev, SEM); /* flush */
154}
155
156/* This function waits for a specific bit to come ready
157 * in a given register. It is used mostly by the initialize
158 * process, but is also used in kernel thread API such as
159 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
160 */
161int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
162{
163 u32 temp;
164 int count = UDELAY_COUNT;
165
166 while (count) {
167 temp = ql_read32(qdev, reg);
168
169 /* check for errors */
170 if (temp & err_bit) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000171 netif_alert(qdev, probe, qdev->ndev,
172 "register 0x%.08x access error, value = 0x%.08x!.\n",
173 reg, temp);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400174 return -EIO;
175 } else if (temp & bit)
176 return 0;
177 udelay(UDELAY_DELAY);
178 count--;
179 }
Joe Perchesae9540f2010-02-09 11:49:52 +0000180 netif_alert(qdev, probe, qdev->ndev,
181 "Timed out waiting for reg %x to come ready.\n", reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400182 return -ETIMEDOUT;
183}
184
185/* The CFG register is used to download TX and RX control blocks
186 * to the chip. This function waits for an operation to complete.
187 */
188static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
189{
190 int count = UDELAY_COUNT;
191 u32 temp;
192
193 while (count) {
194 temp = ql_read32(qdev, CFG);
195 if (temp & CFG_LE)
196 return -EIO;
197 if (!(temp & bit))
198 return 0;
199 udelay(UDELAY_DELAY);
200 count--;
201 }
202 return -ETIMEDOUT;
203}
204
205
206/* Used to issue init control blocks to hw. Maps control block,
207 * sets address, triggers download, waits for completion.
208 */
209int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
210 u16 q_id)
211{
212 u64 map;
213 int status = 0;
214 int direction;
215 u32 mask;
216 u32 value;
217
218 direction =
219 (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
220 PCI_DMA_FROMDEVICE;
221
222 map = pci_map_single(qdev->pdev, ptr, size, direction);
223 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000224 netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400225 return -ENOMEM;
226 }
227
Ron Mercer4322c5b2009-07-02 06:06:06 +0000228 status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
229 if (status)
230 return status;
231
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400232 status = ql_wait_cfg(qdev, bit);
233 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000234 netif_err(qdev, ifup, qdev->ndev,
235 "Timed out waiting for CFG to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400236 goto exit;
237 }
238
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400239 ql_write32(qdev, ICB_L, (u32) map);
240 ql_write32(qdev, ICB_H, (u32) (map >> 32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400241
242 mask = CFG_Q_MASK | (bit << 16);
243 value = bit | (q_id << CFG_Q_SHIFT);
244 ql_write32(qdev, CFG, (mask | value));
245
246 /*
247 * Wait for the bit to clear after signaling hw.
248 */
249 status = ql_wait_cfg(qdev, bit);
250exit:
Ron Mercer4322c5b2009-07-02 06:06:06 +0000251 ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400252 pci_unmap_single(qdev->pdev, map, size, direction);
253 return status;
254}
255
256/* Get a specific MAC address from the CAM. Used for debug and reg dump. */
257int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
258 u32 *value)
259{
260 u32 offset = 0;
261 int status;
262
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400263 switch (type) {
264 case MAC_ADDR_TYPE_MULTI_MAC:
265 case MAC_ADDR_TYPE_CAM_MAC:
266 {
267 status =
268 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800269 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400270 if (status)
271 goto exit;
272 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
273 (index << MAC_ADDR_IDX_SHIFT) | /* index */
274 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
275 status =
276 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800277 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400278 if (status)
279 goto exit;
280 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
281 status =
282 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800283 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400284 if (status)
285 goto exit;
286 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
287 (index << MAC_ADDR_IDX_SHIFT) | /* index */
288 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
289 status =
290 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800291 MAC_ADDR_IDX, MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400292 if (status)
293 goto exit;
294 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
295 if (type == MAC_ADDR_TYPE_CAM_MAC) {
296 status =
297 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800298 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400299 if (status)
300 goto exit;
301 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
302 (index << MAC_ADDR_IDX_SHIFT) | /* index */
303 MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
304 status =
305 ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
Ron Mercer939678f2009-01-04 17:08:29 -0800306 MAC_ADDR_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400307 if (status)
308 goto exit;
309 *value++ = ql_read32(qdev, MAC_ADDR_DATA);
310 }
311 break;
312 }
313 case MAC_ADDR_TYPE_VLAN:
314 case MAC_ADDR_TYPE_MULTI_FLTR:
315 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000316 netif_crit(qdev, ifup, qdev->ndev,
317 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400318 status = -EPERM;
319 }
320exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400321 return status;
322}
323
324/* Set up a MAC, multicast or VLAN address for the
325 * inbound frame matching.
326 */
327static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
328 u16 index)
329{
330 u32 offset = 0;
331 int status = 0;
332
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400333 switch (type) {
334 case MAC_ADDR_TYPE_MULTI_MAC:
Ron Mercer76b26692009-10-08 09:54:40 +0000335 {
336 u32 upper = (addr[0] << 8) | addr[1];
337 u32 lower = (addr[2] << 24) | (addr[3] << 16) |
338 (addr[4] << 8) | (addr[5]);
339
340 status =
341 ql_wait_reg_rdy(qdev,
342 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
343 if (status)
344 goto exit;
345 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
346 (index << MAC_ADDR_IDX_SHIFT) |
347 type | MAC_ADDR_E);
348 ql_write32(qdev, MAC_ADDR_DATA, lower);
349 status =
350 ql_wait_reg_rdy(qdev,
351 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
352 if (status)
353 goto exit;
354 ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
355 (index << MAC_ADDR_IDX_SHIFT) |
356 type | MAC_ADDR_E);
357
358 ql_write32(qdev, MAC_ADDR_DATA, upper);
359 status =
360 ql_wait_reg_rdy(qdev,
361 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
362 if (status)
363 goto exit;
364 break;
365 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400366 case MAC_ADDR_TYPE_CAM_MAC:
367 {
368 u32 cam_output;
369 u32 upper = (addr[0] << 8) | addr[1];
370 u32 lower =
371 (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
372 (addr[5]);
373
Joe Perchesae9540f2010-02-09 11:49:52 +0000374 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
375 "Adding %s address %pM at index %d in the CAM.\n",
376 type == MAC_ADDR_TYPE_MULTI_MAC ?
377 "MULTICAST" : "UNICAST",
378 addr, index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400379
380 status =
381 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800382 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400383 if (status)
384 goto exit;
385 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
386 (index << MAC_ADDR_IDX_SHIFT) | /* index */
387 type); /* type */
388 ql_write32(qdev, MAC_ADDR_DATA, lower);
389 status =
390 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800391 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400392 if (status)
393 goto exit;
394 ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
395 (index << MAC_ADDR_IDX_SHIFT) | /* index */
396 type); /* type */
397 ql_write32(qdev, MAC_ADDR_DATA, upper);
398 status =
399 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800400 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400401 if (status)
402 goto exit;
403 ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
404 (index << MAC_ADDR_IDX_SHIFT) | /* index */
405 type); /* type */
406 /* This field should also include the queue id
407 and possibly the function id. Right now we hardcode
408 the route field to NIC core.
409 */
Ron Mercer76b26692009-10-08 09:54:40 +0000410 cam_output = (CAM_OUT_ROUTE_NIC |
411 (qdev->
412 func << CAM_OUT_FUNC_SHIFT) |
413 (0 << CAM_OUT_CQ_ID_SHIFT));
414 if (qdev->vlgrp)
415 cam_output |= CAM_OUT_RV;
416 /* route to NIC core */
417 ql_write32(qdev, MAC_ADDR_DATA, cam_output);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400418 break;
419 }
420 case MAC_ADDR_TYPE_VLAN:
421 {
422 u32 enable_bit = *((u32 *) &addr[0]);
423 /* For VLAN, the addr actually holds a bit that
424 * either enables or disables the vlan id we are
425 * addressing. It's either MAC_ADDR_E on or off.
426 * That's bit-27 we're talking about.
427 */
Joe Perchesae9540f2010-02-09 11:49:52 +0000428 netif_info(qdev, ifup, qdev->ndev,
429 "%s VLAN ID %d %s the CAM.\n",
430 enable_bit ? "Adding" : "Removing",
431 index,
432 enable_bit ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400433
434 status =
435 ql_wait_reg_rdy(qdev,
Ron Mercer939678f2009-01-04 17:08:29 -0800436 MAC_ADDR_IDX, MAC_ADDR_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400437 if (status)
438 goto exit;
439 ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
440 (index << MAC_ADDR_IDX_SHIFT) | /* index */
441 type | /* type */
442 enable_bit); /* enable/disable */
443 break;
444 }
445 case MAC_ADDR_TYPE_MULTI_FLTR:
446 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000447 netif_crit(qdev, ifup, qdev->ndev,
448 "Address type %d not yet supported.\n", type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400449 status = -EPERM;
450 }
451exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400452 return status;
453}
454
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000455/* Set or clear MAC address in hardware. We sometimes
456 * have to clear it to prevent wrong frame routing
457 * especially in a bonding environment.
458 */
459static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
460{
461 int status;
462 char zero_mac_addr[ETH_ALEN];
463 char *addr;
464
465 if (set) {
466 addr = &qdev->ndev->dev_addr[0];
Joe Perchesae9540f2010-02-09 11:49:52 +0000467 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
468 "Set Mac addr %pM\n", addr);
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000469 } else {
470 memset(zero_mac_addr, 0, ETH_ALEN);
471 addr = &zero_mac_addr[0];
Joe Perchesae9540f2010-02-09 11:49:52 +0000472 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 "Clearing MAC address\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000474 }
475 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
476 if (status)
477 return status;
478 status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
479 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
480 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
481 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +0000482 netif_err(qdev, ifup, qdev->ndev,
483 "Failed to init mac address.\n");
Ron Mercer7fab3bf2009-07-02 06:06:11 +0000484 return status;
485}
486
Ron Mercer6a473302009-07-02 06:06:12 +0000487void ql_link_on(struct ql_adapter *qdev)
488{
Joe Perchesae9540f2010-02-09 11:49:52 +0000489 netif_err(qdev, link, qdev->ndev, "Link is up.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000490 netif_carrier_on(qdev->ndev);
491 ql_set_mac_addr(qdev, 1);
492}
493
494void ql_link_off(struct ql_adapter *qdev)
495{
Joe Perchesae9540f2010-02-09 11:49:52 +0000496 netif_err(qdev, link, qdev->ndev, "Link is down.\n");
Ron Mercer6a473302009-07-02 06:06:12 +0000497 netif_carrier_off(qdev->ndev);
498 ql_set_mac_addr(qdev, 0);
499}
500
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400501/* Get a specific frame routing value from the CAM.
502 * Used for debug and reg dump.
503 */
504int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
505{
506 int status = 0;
507
Ron Mercer939678f2009-01-04 17:08:29 -0800508 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400509 if (status)
510 goto exit;
511
512 ql_write32(qdev, RT_IDX,
513 RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
Ron Mercer939678f2009-01-04 17:08:29 -0800514 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400515 if (status)
516 goto exit;
517 *value = ql_read32(qdev, RT_DATA);
518exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400519 return status;
520}
521
522/* The NIC function for this chip has 16 routing indexes. Each one can be used
523 * to route different frame types to various inbound queues. We send broadcast/
524 * multicast/error frames to the default queue for slow handling,
525 * and CAM hit/RSS frames to the fast handling queues.
526 */
527static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
528 int enable)
529{
Ron Mercer8587ea32009-02-23 10:42:15 +0000530 int status = -EINVAL; /* Return error if no mask match. */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400531 u32 value = 0;
532
Joe Perchesae9540f2010-02-09 11:49:52 +0000533 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
534 "%s %s mask %s the routing reg.\n",
535 enable ? "Adding" : "Removing",
536 index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
537 index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
538 index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
539 index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
540 index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
541 index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
542 index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
543 index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
544 index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
545 index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
546 index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
547 index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
548 index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
549 index == RT_IDX_UNUSED013 ? "UNUSED13" :
550 index == RT_IDX_UNUSED014 ? "UNUSED14" :
551 index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
552 "(Bad index != RT_IDX)",
553 enable ? "to" : "from");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400554
555 switch (mask) {
556 case RT_IDX_CAM_HIT:
557 {
558 value = RT_IDX_DST_CAM_Q | /* dest */
559 RT_IDX_TYPE_NICQ | /* type */
560 (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
561 break;
562 }
563 case RT_IDX_VALID: /* Promiscuous Mode frames. */
564 {
565 value = RT_IDX_DST_DFLT_Q | /* dest */
566 RT_IDX_TYPE_NICQ | /* type */
567 (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
568 break;
569 }
570 case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
571 {
572 value = RT_IDX_DST_DFLT_Q | /* dest */
573 RT_IDX_TYPE_NICQ | /* type */
574 (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
575 break;
576 }
577 case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
578 {
579 value = RT_IDX_DST_DFLT_Q | /* dest */
580 RT_IDX_TYPE_NICQ | /* type */
581 (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
582 break;
583 }
584 case RT_IDX_MCAST: /* Pass up All Multicast frames. */
585 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000586 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400587 RT_IDX_TYPE_NICQ | /* type */
588 (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
589 break;
590 }
591 case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
592 {
Ron Mercere163d7f2009-10-08 09:54:39 +0000593 value = RT_IDX_DST_DFLT_Q | /* dest */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400594 RT_IDX_TYPE_NICQ | /* type */
595 (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
596 break;
597 }
598 case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
599 {
600 value = RT_IDX_DST_RSS | /* dest */
601 RT_IDX_TYPE_NICQ | /* type */
602 (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
603 break;
604 }
605 case 0: /* Clear the E-bit on an entry. */
606 {
607 value = RT_IDX_DST_DFLT_Q | /* dest */
608 RT_IDX_TYPE_NICQ | /* type */
609 (index << RT_IDX_IDX_SHIFT);/* index */
610 break;
611 }
612 default:
Joe Perchesae9540f2010-02-09 11:49:52 +0000613 netif_err(qdev, ifup, qdev->ndev,
614 "Mask type %d not yet supported.\n", mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400615 status = -EPERM;
616 goto exit;
617 }
618
619 if (value) {
620 status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
621 if (status)
622 goto exit;
623 value |= (enable ? RT_IDX_E : 0);
624 ql_write32(qdev, RT_IDX, value);
625 ql_write32(qdev, RT_DATA, enable ? mask : 0);
626 }
627exit:
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400628 return status;
629}
630
631static void ql_enable_interrupts(struct ql_adapter *qdev)
632{
633 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
634}
635
636static void ql_disable_interrupts(struct ql_adapter *qdev)
637{
638 ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
639}
640
641/* If we're running with multiple MSI-X vectors then we enable on the fly.
642 * Otherwise, we may have multiple outstanding workers and don't want to
643 * enable until the last one finishes. In this case, the irq_cnt gets
644 * incremented everytime we queue a worker and decremented everytime
645 * a worker finishes. Once it hits zero we enable the interrupt.
646 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700647u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400648{
Ron Mercerbb0d2152008-10-20 10:30:26 -0700649 u32 var = 0;
650 unsigned long hw_flags = 0;
651 struct intr_context *ctx = qdev->intr_context + intr;
652
653 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
654 /* Always enable if we're MSIX multi interrupts and
655 * it's not the default (zeroeth) interrupt.
656 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400657 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700658 ctx->intr_en_mask);
659 var = ql_read32(qdev, STS);
660 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400661 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700662
663 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
664 if (atomic_dec_and_test(&ctx->irq_cnt)) {
665 ql_write32(qdev, INTR_EN,
666 ctx->intr_en_mask);
667 var = ql_read32(qdev, STS);
668 }
669 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
670 return var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400671}
672
673static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
674{
675 u32 var = 0;
Ron Mercerbb0d2152008-10-20 10:30:26 -0700676 struct intr_context *ctx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400677
Ron Mercerbb0d2152008-10-20 10:30:26 -0700678 /* HW disables for us if we're MSIX multi interrupts and
679 * it's not the default (zeroeth) interrupt.
680 */
681 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
682 return 0;
683
684 ctx = qdev->intr_context + intr;
Ron Mercer08b1bc82009-03-09 10:59:23 +0000685 spin_lock(&qdev->hw_lock);
Ron Mercerbb0d2152008-10-20 10:30:26 -0700686 if (!atomic_read(&ctx->irq_cnt)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400687 ql_write32(qdev, INTR_EN,
Ron Mercerbb0d2152008-10-20 10:30:26 -0700688 ctx->intr_dis_mask);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400689 var = ql_read32(qdev, STS);
690 }
Ron Mercerbb0d2152008-10-20 10:30:26 -0700691 atomic_inc(&ctx->irq_cnt);
Ron Mercer08b1bc82009-03-09 10:59:23 +0000692 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400693 return var;
694}
695
696static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
697{
698 int i;
699 for (i = 0; i < qdev->intr_count; i++) {
700 /* The enable call does a atomic_dec_and_test
701 * and enables only if the result is zero.
702 * So we precharge it here.
703 */
Ron Mercerbb0d2152008-10-20 10:30:26 -0700704 if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
705 i == 0))
706 atomic_set(&qdev->intr_context[i].irq_cnt, 1);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400707 ql_enable_completion_interrupt(qdev, i);
708 }
709
710}
711
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000712static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
713{
714 int status, i;
715 u16 csum = 0;
716 __le16 *flash = (__le16 *)&qdev->flash;
717
718 status = strncmp((char *)&qdev->flash, str, 4);
719 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000720 netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000721 return status;
722 }
723
724 for (i = 0; i < size; i++)
725 csum += le16_to_cpu(*flash++);
726
727 if (csum)
Joe Perchesae9540f2010-02-09 11:49:52 +0000728 netif_err(qdev, ifup, qdev->ndev,
729 "Invalid flash checksum, csum = 0x%.04x.\n", csum);
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000730
731 return csum;
732}
733
Ron Mercer26351472009-02-02 13:53:57 -0800734static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400735{
736 int status = 0;
737 /* wait for reg to come ready */
738 status = ql_wait_reg_rdy(qdev,
739 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
740 if (status)
741 goto exit;
742 /* set up for reg read */
743 ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
744 /* wait for reg to come ready */
745 status = ql_wait_reg_rdy(qdev,
746 FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
747 if (status)
748 goto exit;
Ron Mercer26351472009-02-02 13:53:57 -0800749 /* This data is stored on flash as an array of
750 * __le32. Since ql_read32() returns cpu endian
751 * we need to swap it back.
752 */
753 *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400754exit:
755 return status;
756}
757
Ron Mercercdca8d02009-03-02 08:07:31 +0000758static int ql_get_8000_flash_params(struct ql_adapter *qdev)
759{
760 u32 i, size;
761 int status;
762 __le32 *p = (__le32 *)&qdev->flash;
763 u32 offset;
Ron Mercer542512e2009-06-09 05:39:33 +0000764 u8 mac_addr[6];
Ron Mercercdca8d02009-03-02 08:07:31 +0000765
766 /* Get flash offset for function and adjust
767 * for dword access.
768 */
Ron Mercere4552f52009-06-09 05:39:32 +0000769 if (!qdev->port)
Ron Mercercdca8d02009-03-02 08:07:31 +0000770 offset = FUNC0_FLASH_OFFSET / sizeof(u32);
771 else
772 offset = FUNC1_FLASH_OFFSET / sizeof(u32);
773
774 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
775 return -ETIMEDOUT;
776
777 size = sizeof(struct flash_params_8000) / sizeof(u32);
778 for (i = 0; i < size; i++, p++) {
779 status = ql_read_flash_word(qdev, i+offset, p);
780 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000781 netif_err(qdev, ifup, qdev->ndev,
782 "Error reading flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000783 goto exit;
784 }
785 }
786
787 status = ql_validate_flash(qdev,
788 sizeof(struct flash_params_8000) / sizeof(u16),
789 "8000");
790 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000791 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000792 status = -EINVAL;
793 goto exit;
794 }
795
Ron Mercer542512e2009-06-09 05:39:33 +0000796 /* Extract either manufacturer or BOFM modified
797 * MAC address.
798 */
799 if (qdev->flash.flash_params_8000.data_type1 == 2)
800 memcpy(mac_addr,
801 qdev->flash.flash_params_8000.mac_addr1,
802 qdev->ndev->addr_len);
803 else
804 memcpy(mac_addr,
805 qdev->flash.flash_params_8000.mac_addr,
806 qdev->ndev->addr_len);
807
808 if (!is_valid_ether_addr(mac_addr)) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000809 netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
Ron Mercercdca8d02009-03-02 08:07:31 +0000810 status = -EINVAL;
811 goto exit;
812 }
813
814 memcpy(qdev->ndev->dev_addr,
Ron Mercer542512e2009-06-09 05:39:33 +0000815 mac_addr,
Ron Mercercdca8d02009-03-02 08:07:31 +0000816 qdev->ndev->addr_len);
817
818exit:
819 ql_sem_unlock(qdev, SEM_FLASH_MASK);
820 return status;
821}
822
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000823static int ql_get_8012_flash_params(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400824{
825 int i;
826 int status;
Ron Mercer26351472009-02-02 13:53:57 -0800827 __le32 *p = (__le32 *)&qdev->flash;
Ron Mercere78f5fa2009-02-02 13:54:15 -0800828 u32 offset = 0;
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000829 u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
Ron Mercere78f5fa2009-02-02 13:54:15 -0800830
831 /* Second function's parameters follow the first
832 * function's.
833 */
Ron Mercere4552f52009-06-09 05:39:32 +0000834 if (qdev->port)
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000835 offset = size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400836
837 if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
838 return -ETIMEDOUT;
839
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000840 for (i = 0; i < size; i++, p++) {
Ron Mercere78f5fa2009-02-02 13:54:15 -0800841 status = ql_read_flash_word(qdev, i+offset, p);
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400842 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000843 netif_err(qdev, ifup, qdev->ndev,
844 "Error reading flash.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400845 goto exit;
846 }
847
848 }
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000849
850 status = ql_validate_flash(qdev,
851 sizeof(struct flash_params_8012) / sizeof(u16),
852 "8012");
853 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000854 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000855 status = -EINVAL;
856 goto exit;
857 }
858
859 if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
860 status = -EINVAL;
861 goto exit;
862 }
863
864 memcpy(qdev->ndev->dev_addr,
865 qdev->flash.flash_params_8012.mac_addr,
866 qdev->ndev->addr_len);
867
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400868exit:
869 ql_sem_unlock(qdev, SEM_FLASH_MASK);
870 return status;
871}
872
873/* xgmac register are located behind the xgmac_addr and xgmac_data
874 * register pair. Each read/write requires us to wait for the ready
875 * bit before reading/writing the data.
876 */
877static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
878{
879 int status;
880 /* wait for reg to come ready */
881 status = ql_wait_reg_rdy(qdev,
882 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
883 if (status)
884 return status;
885 /* write the data to the data reg */
886 ql_write32(qdev, XGMAC_DATA, data);
887 /* trigger the write */
888 ql_write32(qdev, XGMAC_ADDR, reg);
889 return status;
890}
891
892/* xgmac register are located behind the xgmac_addr and xgmac_data
893 * register pair. Each read/write requires us to wait for the ready
894 * bit before reading/writing the data.
895 */
896int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
897{
898 int status = 0;
899 /* wait for reg to come ready */
900 status = ql_wait_reg_rdy(qdev,
901 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
902 if (status)
903 goto exit;
904 /* set up for reg read */
905 ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
906 /* wait for reg to come ready */
907 status = ql_wait_reg_rdy(qdev,
908 XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
909 if (status)
910 goto exit;
911 /* get the data */
912 *data = ql_read32(qdev, XGMAC_DATA);
913exit:
914 return status;
915}
916
917/* This is used for reading the 64-bit statistics regs. */
918int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
919{
920 int status = 0;
921 u32 hi = 0;
922 u32 lo = 0;
923
924 status = ql_read_xgmac_reg(qdev, reg, &lo);
925 if (status)
926 goto exit;
927
928 status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
929 if (status)
930 goto exit;
931
932 *data = (u64) lo | ((u64) hi << 32);
933
934exit:
935 return status;
936}
937
Ron Mercercdca8d02009-03-02 08:07:31 +0000938static int ql_8000_port_initialize(struct ql_adapter *qdev)
939{
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000940 int status;
Ron Mercercfec0cb2009-06-09 05:39:29 +0000941 /*
942 * Get MPI firmware version for driver banner
943 * and ethool info.
944 */
945 status = ql_mb_about_fw(qdev);
946 if (status)
947 goto exit;
Ron Mercerbcc2cb32009-03-02 08:07:32 +0000948 status = ql_mb_get_fw_state(qdev);
949 if (status)
950 goto exit;
951 /* Wake up a worker to get/set the TX/RX frame sizes. */
952 queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
953exit:
954 return status;
Ron Mercercdca8d02009-03-02 08:07:31 +0000955}
956
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400957/* Take the MAC Core out of reset.
958 * Enable statistics counting.
959 * Take the transmitter/receiver out of reset.
960 * This functionality may be done in the MPI firmware at a
961 * later date.
962 */
Ron Mercerb0c2aad2009-02-26 10:08:35 +0000963static int ql_8012_port_initialize(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400964{
965 int status = 0;
966 u32 data;
967
968 if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
969 /* Another function has the semaphore, so
970 * wait for the port init bit to come ready.
971 */
Joe Perchesae9540f2010-02-09 11:49:52 +0000972 netif_info(qdev, link, qdev->ndev,
973 "Another function has the semaphore, so wait for the port init bit to come ready.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400974 status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
975 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +0000976 netif_crit(qdev, link, qdev->ndev,
977 "Port initialize timed out.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400978 }
979 return status;
980 }
981
Joe Perchesae9540f2010-02-09 11:49:52 +0000982 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -0400983 /* Set the core reset. */
984 status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
985 if (status)
986 goto end;
987 data |= GLOBAL_CFG_RESET;
988 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
989 if (status)
990 goto end;
991
992 /* Clear the core reset and turn on jumbo for receiver. */
993 data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
994 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
995 data |= GLOBAL_CFG_TX_STAT_EN;
996 data |= GLOBAL_CFG_RX_STAT_EN;
997 status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
998 if (status)
999 goto end;
1000
1001 /* Enable transmitter, and clear it's reset. */
1002 status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1003 if (status)
1004 goto end;
1005 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
1006 data |= TX_CFG_EN; /* Enable the transmitter. */
1007 status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1008 if (status)
1009 goto end;
1010
1011 /* Enable receiver and clear it's reset. */
1012 status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1013 if (status)
1014 goto end;
1015 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
1016 data |= RX_CFG_EN; /* Enable the receiver. */
1017 status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1018 if (status)
1019 goto end;
1020
1021 /* Turn on jumbo. */
1022 status =
1023 ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1024 if (status)
1025 goto end;
1026 status =
1027 ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1028 if (status)
1029 goto end;
1030
1031 /* Signal to the world that the port is enabled. */
1032 ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1033end:
1034 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1035 return status;
1036}
1037
Ron Mercer7c734352009-10-19 03:32:19 +00001038static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1039{
1040 return PAGE_SIZE << qdev->lbq_buf_order;
1041}
1042
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001043/* Get the next large buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001044static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001045{
1046 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1047 rx_ring->lbq_curr_idx++;
1048 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1049 rx_ring->lbq_curr_idx = 0;
1050 rx_ring->lbq_free_cnt++;
1051 return lbq_desc;
1052}
1053
Ron Mercer7c734352009-10-19 03:32:19 +00001054static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1055 struct rx_ring *rx_ring)
1056{
1057 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1058
1059 pci_dma_sync_single_for_cpu(qdev->pdev,
1060 pci_unmap_addr(lbq_desc, mapaddr),
1061 rx_ring->lbq_buf_size,
1062 PCI_DMA_FROMDEVICE);
1063
1064 /* If it's the last chunk of our master page then
1065 * we unmap it.
1066 */
1067 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1068 == ql_lbq_block_size(qdev))
1069 pci_unmap_page(qdev->pdev,
1070 lbq_desc->p.pg_chunk.map,
1071 ql_lbq_block_size(qdev),
1072 PCI_DMA_FROMDEVICE);
1073 return lbq_desc;
1074}
1075
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001076/* Get the next small buffer. */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001077static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001078{
1079 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1080 rx_ring->sbq_curr_idx++;
1081 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1082 rx_ring->sbq_curr_idx = 0;
1083 rx_ring->sbq_free_cnt++;
1084 return sbq_desc;
1085}
1086
1087/* Update an rx ring index. */
1088static void ql_update_cq(struct rx_ring *rx_ring)
1089{
1090 rx_ring->cnsmr_idx++;
1091 rx_ring->curr_entry++;
1092 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1093 rx_ring->cnsmr_idx = 0;
1094 rx_ring->curr_entry = rx_ring->cq_base;
1095 }
1096}
1097
1098static void ql_write_cq_idx(struct rx_ring *rx_ring)
1099{
1100 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1101}
1102
Ron Mercer7c734352009-10-19 03:32:19 +00001103static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1104 struct bq_desc *lbq_desc)
1105{
1106 if (!rx_ring->pg_chunk.page) {
1107 u64 map;
1108 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1109 GFP_ATOMIC,
1110 qdev->lbq_buf_order);
1111 if (unlikely(!rx_ring->pg_chunk.page)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001112 netif_err(qdev, drv, qdev->ndev,
1113 "page allocation failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001114 return -ENOMEM;
1115 }
1116 rx_ring->pg_chunk.offset = 0;
1117 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1118 0, ql_lbq_block_size(qdev),
1119 PCI_DMA_FROMDEVICE);
1120 if (pci_dma_mapping_error(qdev->pdev, map)) {
1121 __free_pages(rx_ring->pg_chunk.page,
1122 qdev->lbq_buf_order);
Joe Perchesae9540f2010-02-09 11:49:52 +00001123 netif_err(qdev, drv, qdev->ndev,
1124 "PCI mapping failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00001125 return -ENOMEM;
1126 }
1127 rx_ring->pg_chunk.map = map;
1128 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1129 }
1130
1131 /* Copy the current master pg_chunk info
1132 * to the current descriptor.
1133 */
1134 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1135
1136 /* Adjust the master page chunk for next
1137 * buffer get.
1138 */
1139 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1140 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1141 rx_ring->pg_chunk.page = NULL;
1142 lbq_desc->p.pg_chunk.last_flag = 1;
1143 } else {
1144 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1145 get_page(rx_ring->pg_chunk.page);
1146 lbq_desc->p.pg_chunk.last_flag = 0;
1147 }
1148 return 0;
1149}
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001150/* Process (refill) a large buffer queue. */
1151static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1152{
Ron Mercer49f21862009-02-23 10:42:16 +00001153 u32 clean_idx = rx_ring->lbq_clean_idx;
1154 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001155 struct bq_desc *lbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001156 u64 map;
1157 int i;
1158
Ron Mercer7c734352009-10-19 03:32:19 +00001159 while (rx_ring->lbq_free_cnt > 32) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001160 for (i = 0; i < 16; i++) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001161 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1162 "lbq: try cleaning clean_idx = %d.\n",
1163 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001164 lbq_desc = &rx_ring->lbq[clean_idx];
Ron Mercer7c734352009-10-19 03:32:19 +00001165 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001166 netif_err(qdev, ifup, qdev->ndev,
1167 "Could not get a page chunk.\n");
1168 return;
1169 }
Ron Mercer7c734352009-10-19 03:32:19 +00001170
1171 map = lbq_desc->p.pg_chunk.map +
1172 lbq_desc->p.pg_chunk.offset;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001173 pci_unmap_addr_set(lbq_desc, mapaddr, map);
Ron Mercer7c734352009-10-19 03:32:19 +00001174 pci_unmap_len_set(lbq_desc, maplen,
1175 rx_ring->lbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001176 *lbq_desc->addr = cpu_to_le64(map);
Ron Mercer7c734352009-10-19 03:32:19 +00001177
1178 pci_dma_sync_single_for_device(qdev->pdev, map,
1179 rx_ring->lbq_buf_size,
1180 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001181 clean_idx++;
1182 if (clean_idx == rx_ring->lbq_len)
1183 clean_idx = 0;
1184 }
1185
1186 rx_ring->lbq_clean_idx = clean_idx;
1187 rx_ring->lbq_prod_idx += 16;
1188 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1189 rx_ring->lbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001190 rx_ring->lbq_free_cnt -= 16;
1191 }
1192
1193 if (start_idx != clean_idx) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001194 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1195 "lbq: updating prod idx = %d.\n",
1196 rx_ring->lbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001197 ql_write_db_reg(rx_ring->lbq_prod_idx,
1198 rx_ring->lbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001199 }
1200}
1201
1202/* Process (refill) a small buffer queue. */
1203static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1204{
Ron Mercer49f21862009-02-23 10:42:16 +00001205 u32 clean_idx = rx_ring->sbq_clean_idx;
1206 u32 start_idx = clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001207 struct bq_desc *sbq_desc;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001208 u64 map;
1209 int i;
1210
1211 while (rx_ring->sbq_free_cnt > 16) {
1212 for (i = 0; i < 16; i++) {
1213 sbq_desc = &rx_ring->sbq[clean_idx];
Joe Perchesae9540f2010-02-09 11:49:52 +00001214 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1215 "sbq: try cleaning clean_idx = %d.\n",
1216 clean_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001217 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001218 netif_printk(qdev, rx_status, KERN_DEBUG,
1219 qdev->ndev,
1220 "sbq: getting new skb for index %d.\n",
1221 sbq_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001222 sbq_desc->p.skb =
1223 netdev_alloc_skb(qdev->ndev,
Ron Mercer52e55f32009-10-10 09:35:07 +00001224 SMALL_BUFFER_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001225 if (sbq_desc->p.skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001226 netif_err(qdev, probe, qdev->ndev,
1227 "Couldn't get an skb.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001228 rx_ring->sbq_clean_idx = clean_idx;
1229 return;
1230 }
1231 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1232 map = pci_map_single(qdev->pdev,
1233 sbq_desc->p.skb->data,
Ron Mercer52e55f32009-10-10 09:35:07 +00001234 rx_ring->sbq_buf_size,
1235 PCI_DMA_FROMDEVICE);
Ron Mercerc907a352009-01-04 17:06:46 -08001236 if (pci_dma_mapping_error(qdev->pdev, map)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001237 netif_err(qdev, ifup, qdev->ndev,
1238 "PCI mapping failed.\n");
Ron Mercerc907a352009-01-04 17:06:46 -08001239 rx_ring->sbq_clean_idx = clean_idx;
Ron Mercer06a3d512009-02-12 16:37:48 -08001240 dev_kfree_skb_any(sbq_desc->p.skb);
1241 sbq_desc->p.skb = NULL;
Ron Mercerc907a352009-01-04 17:06:46 -08001242 return;
1243 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001244 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1245 pci_unmap_len_set(sbq_desc, maplen,
Ron Mercer52e55f32009-10-10 09:35:07 +00001246 rx_ring->sbq_buf_size);
Ron Mercer2c9a0d42009-01-05 18:19:20 -08001247 *sbq_desc->addr = cpu_to_le64(map);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001248 }
1249
1250 clean_idx++;
1251 if (clean_idx == rx_ring->sbq_len)
1252 clean_idx = 0;
1253 }
1254 rx_ring->sbq_clean_idx = clean_idx;
1255 rx_ring->sbq_prod_idx += 16;
1256 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1257 rx_ring->sbq_prod_idx = 0;
Ron Mercer49f21862009-02-23 10:42:16 +00001258 rx_ring->sbq_free_cnt -= 16;
1259 }
1260
1261 if (start_idx != clean_idx) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001262 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1263 "sbq: updating prod idx = %d.\n",
1264 rx_ring->sbq_prod_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001265 ql_write_db_reg(rx_ring->sbq_prod_idx,
1266 rx_ring->sbq_prod_idx_db_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001267 }
1268}
1269
1270static void ql_update_buffer_queues(struct ql_adapter *qdev,
1271 struct rx_ring *rx_ring)
1272{
1273 ql_update_sbq(qdev, rx_ring);
1274 ql_update_lbq(qdev, rx_ring);
1275}
1276
1277/* Unmaps tx buffers. Can be called from send() if a pci mapping
1278 * fails at some stage, or from the interrupt when a tx completes.
1279 */
1280static void ql_unmap_send(struct ql_adapter *qdev,
1281 struct tx_ring_desc *tx_ring_desc, int mapped)
1282{
1283 int i;
1284 for (i = 0; i < mapped; i++) {
1285 if (i == 0 || (i == 7 && mapped > 7)) {
1286 /*
1287 * Unmap the skb->data area, or the
1288 * external sglist (AKA the Outbound
1289 * Address List (OAL)).
1290 * If its the zeroeth element, then it's
1291 * the skb->data area. If it's the 7th
1292 * element and there is more than 6 frags,
1293 * then its an OAL.
1294 */
1295 if (i == 7) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001296 netif_printk(qdev, tx_done, KERN_DEBUG,
1297 qdev->ndev,
1298 "unmapping OAL area.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001299 }
1300 pci_unmap_single(qdev->pdev,
1301 pci_unmap_addr(&tx_ring_desc->map[i],
1302 mapaddr),
1303 pci_unmap_len(&tx_ring_desc->map[i],
1304 maplen),
1305 PCI_DMA_TODEVICE);
1306 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00001307 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1308 "unmapping frag %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001309 pci_unmap_page(qdev->pdev,
1310 pci_unmap_addr(&tx_ring_desc->map[i],
1311 mapaddr),
1312 pci_unmap_len(&tx_ring_desc->map[i],
1313 maplen), PCI_DMA_TODEVICE);
1314 }
1315 }
1316
1317}
1318
1319/* Map the buffers for this transmit. This will return
1320 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1321 */
1322static int ql_map_send(struct ql_adapter *qdev,
1323 struct ob_mac_iocb_req *mac_iocb_ptr,
1324 struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1325{
1326 int len = skb_headlen(skb);
1327 dma_addr_t map;
1328 int frag_idx, err, map_idx = 0;
1329 struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1330 int frag_cnt = skb_shinfo(skb)->nr_frags;
1331
1332 if (frag_cnt) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001333 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1334 "frag_cnt = %d.\n", frag_cnt);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001335 }
1336 /*
1337 * Map the skb buffer first.
1338 */
1339 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1340
1341 err = pci_dma_mapping_error(qdev->pdev, map);
1342 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001343 netif_err(qdev, tx_queued, qdev->ndev,
1344 "PCI mapping failed with error: %d\n", err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001345
1346 return NETDEV_TX_BUSY;
1347 }
1348
1349 tbd->len = cpu_to_le32(len);
1350 tbd->addr = cpu_to_le64(map);
1351 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1352 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1353 map_idx++;
1354
1355 /*
1356 * This loop fills the remainder of the 8 address descriptors
1357 * in the IOCB. If there are more than 7 fragments, then the
1358 * eighth address desc will point to an external list (OAL).
1359 * When this happens, the remainder of the frags will be stored
1360 * in this list.
1361 */
1362 for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1363 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1364 tbd++;
1365 if (frag_idx == 6 && frag_cnt > 7) {
1366 /* Let's tack on an sglist.
1367 * Our control block will now
1368 * look like this:
1369 * iocb->seg[0] = skb->data
1370 * iocb->seg[1] = frag[0]
1371 * iocb->seg[2] = frag[1]
1372 * iocb->seg[3] = frag[2]
1373 * iocb->seg[4] = frag[3]
1374 * iocb->seg[5] = frag[4]
1375 * iocb->seg[6] = frag[5]
1376 * iocb->seg[7] = ptr to OAL (external sglist)
1377 * oal->seg[0] = frag[6]
1378 * oal->seg[1] = frag[7]
1379 * oal->seg[2] = frag[8]
1380 * oal->seg[3] = frag[9]
1381 * oal->seg[4] = frag[10]
1382 * etc...
1383 */
1384 /* Tack on the OAL in the eighth segment of IOCB. */
1385 map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1386 sizeof(struct oal),
1387 PCI_DMA_TODEVICE);
1388 err = pci_dma_mapping_error(qdev->pdev, map);
1389 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001390 netif_err(qdev, tx_queued, qdev->ndev,
1391 "PCI mapping outbound address list with error: %d\n",
1392 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001393 goto map_error;
1394 }
1395
1396 tbd->addr = cpu_to_le64(map);
1397 /*
1398 * The length is the number of fragments
1399 * that remain to be mapped times the length
1400 * of our sglist (OAL).
1401 */
1402 tbd->len =
1403 cpu_to_le32((sizeof(struct tx_buf_desc) *
1404 (frag_cnt - frag_idx)) | TX_DESC_C);
1405 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1406 map);
1407 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1408 sizeof(struct oal));
1409 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1410 map_idx++;
1411 }
1412
1413 map =
1414 pci_map_page(qdev->pdev, frag->page,
1415 frag->page_offset, frag->size,
1416 PCI_DMA_TODEVICE);
1417
1418 err = pci_dma_mapping_error(qdev->pdev, map);
1419 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001420 netif_err(qdev, tx_queued, qdev->ndev,
1421 "PCI mapping frags failed with error: %d.\n",
1422 err);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001423 goto map_error;
1424 }
1425
1426 tbd->addr = cpu_to_le64(map);
1427 tbd->len = cpu_to_le32(frag->size);
1428 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1429 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1430 frag->size);
1431
1432 }
1433 /* Save the number of segments we've mapped. */
1434 tx_ring_desc->map_cnt = map_idx;
1435 /* Terminate the last segment. */
1436 tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1437 return NETDEV_TX_OK;
1438
1439map_error:
1440 /*
1441 * If the first frag mapping failed, then i will be zero.
1442 * This causes the unmap of the skb->data area. Otherwise
1443 * we pass in the number of frags that mapped successfully
1444 * so they can be umapped.
1445 */
1446 ql_unmap_send(qdev, tx_ring_desc, map_idx);
1447 return NETDEV_TX_BUSY;
1448}
1449
Ron Mercer4f848c02010-01-02 10:37:43 +00001450/* Process an inbound completion from an rx ring. */
Ron Mercer63526712010-01-02 10:37:44 +00001451static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1452 struct rx_ring *rx_ring,
1453 struct ib_mac_iocb_rsp *ib_mac_rsp,
1454 u32 length,
1455 u16 vlan_id)
1456{
1457 struct sk_buff *skb;
1458 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1459 struct skb_frag_struct *rx_frag;
1460 int nr_frags;
1461 struct napi_struct *napi = &rx_ring->napi;
1462
1463 napi->dev = qdev->ndev;
1464
1465 skb = napi_get_frags(napi);
1466 if (!skb) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001467 netif_err(qdev, drv, qdev->ndev,
1468 "Couldn't get an skb, exiting.\n");
Ron Mercer63526712010-01-02 10:37:44 +00001469 rx_ring->rx_dropped++;
1470 put_page(lbq_desc->p.pg_chunk.page);
1471 return;
1472 }
1473 prefetch(lbq_desc->p.pg_chunk.va);
1474 rx_frag = skb_shinfo(skb)->frags;
1475 nr_frags = skb_shinfo(skb)->nr_frags;
1476 rx_frag += nr_frags;
1477 rx_frag->page = lbq_desc->p.pg_chunk.page;
1478 rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
1479 rx_frag->size = length;
1480
1481 skb->len += length;
1482 skb->data_len += length;
1483 skb->truesize += length;
1484 skb_shinfo(skb)->nr_frags++;
1485
1486 rx_ring->rx_packets++;
1487 rx_ring->rx_bytes += length;
1488 skb->ip_summed = CHECKSUM_UNNECESSARY;
1489 skb_record_rx_queue(skb, rx_ring->cq_id);
1490 if (qdev->vlgrp && (vlan_id != 0xffff))
1491 vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
1492 else
1493 napi_gro_frags(napi);
1494}
1495
1496/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001497static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1498 struct rx_ring *rx_ring,
1499 struct ib_mac_iocb_rsp *ib_mac_rsp,
1500 u32 length,
1501 u16 vlan_id)
1502{
1503 struct net_device *ndev = qdev->ndev;
1504 struct sk_buff *skb = NULL;
1505 void *addr;
1506 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1507 struct napi_struct *napi = &rx_ring->napi;
1508
1509 skb = netdev_alloc_skb(ndev, length);
1510 if (!skb) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001511 netif_err(qdev, drv, qdev->ndev,
1512 "Couldn't get an skb, need to unwind!.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001513 rx_ring->rx_dropped++;
1514 put_page(lbq_desc->p.pg_chunk.page);
1515 return;
1516 }
1517
1518 addr = lbq_desc->p.pg_chunk.va;
1519 prefetch(addr);
1520
1521
1522 /* Frame error, so drop the packet. */
1523 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001524 netif_err(qdev, drv, qdev->ndev,
1525 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001526 rx_ring->rx_errors++;
1527 goto err_out;
1528 }
1529
1530 /* The max framesize filter on this chip is set higher than
1531 * MTU since FCoE uses 2k frames.
1532 */
1533 if (skb->len > ndev->mtu + ETH_HLEN) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001534 netif_err(qdev, drv, qdev->ndev,
1535 "Segment too small, dropping.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001536 rx_ring->rx_dropped++;
1537 goto err_out;
1538 }
1539 memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
Joe Perchesae9540f2010-02-09 11:49:52 +00001540 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1541 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1542 length);
Ron Mercer4f848c02010-01-02 10:37:43 +00001543 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1544 lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1545 length-ETH_HLEN);
1546 skb->len += length-ETH_HLEN;
1547 skb->data_len += length-ETH_HLEN;
1548 skb->truesize += length-ETH_HLEN;
1549
1550 rx_ring->rx_packets++;
1551 rx_ring->rx_bytes += skb->len;
1552 skb->protocol = eth_type_trans(skb, ndev);
1553 skb->ip_summed = CHECKSUM_NONE;
1554
1555 if (qdev->rx_csum &&
1556 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1557 /* TCP frame. */
1558 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001559 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1560 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001561 skb->ip_summed = CHECKSUM_UNNECESSARY;
1562 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1563 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1564 /* Unfragmented ipv4 UDP frame. */
1565 struct iphdr *iph = (struct iphdr *) skb->data;
1566 if (!(iph->frag_off &
1567 cpu_to_be16(IP_MF|IP_OFFSET))) {
1568 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f2010-02-09 11:49:52 +00001569 netif_printk(qdev, rx_status, KERN_DEBUG,
1570 qdev->ndev,
1571 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001572 }
1573 }
1574 }
1575
1576 skb_record_rx_queue(skb, rx_ring->cq_id);
1577 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1578 if (qdev->vlgrp && (vlan_id != 0xffff))
1579 vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
1580 else
1581 napi_gro_receive(napi, skb);
1582 } else {
1583 if (qdev->vlgrp && (vlan_id != 0xffff))
1584 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1585 else
1586 netif_receive_skb(skb);
1587 }
1588 return;
1589err_out:
1590 dev_kfree_skb_any(skb);
1591 put_page(lbq_desc->p.pg_chunk.page);
1592}
1593
1594/* Process an inbound completion from an rx ring. */
1595static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1596 struct rx_ring *rx_ring,
1597 struct ib_mac_iocb_rsp *ib_mac_rsp,
1598 u32 length,
1599 u16 vlan_id)
1600{
1601 struct net_device *ndev = qdev->ndev;
1602 struct sk_buff *skb = NULL;
1603 struct sk_buff *new_skb = NULL;
1604 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1605
1606 skb = sbq_desc->p.skb;
1607 /* Allocate new_skb and copy */
1608 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1609 if (new_skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001610 netif_err(qdev, probe, qdev->ndev,
1611 "No skb available, drop the packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001612 rx_ring->rx_dropped++;
1613 return;
1614 }
1615 skb_reserve(new_skb, NET_IP_ALIGN);
1616 memcpy(skb_put(new_skb, length), skb->data, length);
1617 skb = new_skb;
1618
1619 /* Frame error, so drop the packet. */
1620 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001621 netif_err(qdev, drv, qdev->ndev,
1622 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercer4f848c02010-01-02 10:37:43 +00001623 dev_kfree_skb_any(skb);
1624 rx_ring->rx_errors++;
1625 return;
1626 }
1627
1628 /* loopback self test for ethtool */
1629 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1630 ql_check_lb_frame(qdev, skb);
1631 dev_kfree_skb_any(skb);
1632 return;
1633 }
1634
1635 /* The max framesize filter on this chip is set higher than
1636 * MTU since FCoE uses 2k frames.
1637 */
1638 if (skb->len > ndev->mtu + ETH_HLEN) {
1639 dev_kfree_skb_any(skb);
1640 rx_ring->rx_dropped++;
1641 return;
1642 }
1643
1644 prefetch(skb->data);
1645 skb->dev = ndev;
1646 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001647 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1648 "%s Multicast.\n",
1649 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1650 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1651 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1652 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1653 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1654 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer4f848c02010-01-02 10:37:43 +00001655 }
1656 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
Joe Perchesae9540f2010-02-09 11:49:52 +00001657 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1658 "Promiscuous Packet.\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001659
1660 rx_ring->rx_packets++;
1661 rx_ring->rx_bytes += skb->len;
1662 skb->protocol = eth_type_trans(skb, ndev);
1663 skb->ip_summed = CHECKSUM_NONE;
1664
1665 /* If rx checksum is on, and there are no
1666 * csum or frame errors.
1667 */
1668 if (qdev->rx_csum &&
1669 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1670 /* TCP frame. */
1671 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001672 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1673 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001674 skb->ip_summed = CHECKSUM_UNNECESSARY;
1675 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1676 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1677 /* Unfragmented ipv4 UDP frame. */
1678 struct iphdr *iph = (struct iphdr *) skb->data;
1679 if (!(iph->frag_off &
1680 cpu_to_be16(IP_MF|IP_OFFSET))) {
1681 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f2010-02-09 11:49:52 +00001682 netif_printk(qdev, rx_status, KERN_DEBUG,
1683 qdev->ndev,
1684 "TCP checksum done!\n");
Ron Mercer4f848c02010-01-02 10:37:43 +00001685 }
1686 }
1687 }
1688
1689 skb_record_rx_queue(skb, rx_ring->cq_id);
1690 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1691 if (qdev->vlgrp && (vlan_id != 0xffff))
1692 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
1693 vlan_id, skb);
1694 else
1695 napi_gro_receive(&rx_ring->napi, skb);
1696 } else {
1697 if (qdev->vlgrp && (vlan_id != 0xffff))
1698 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
1699 else
1700 netif_receive_skb(skb);
1701 }
1702}
1703
Stephen Hemminger8668ae92008-11-21 17:29:50 -08001704static void ql_realign_skb(struct sk_buff *skb, int len)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001705{
1706 void *temp_addr = skb->data;
1707
1708 /* Undo the skb_reserve(skb,32) we did before
1709 * giving to hardware, and realign data on
1710 * a 2-byte boundary.
1711 */
1712 skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1713 skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1714 skb_copy_to_linear_data(skb, temp_addr,
1715 (unsigned int)len);
1716}
1717
1718/*
1719 * This function builds an skb for the given inbound
1720 * completion. It will be rewritten for readability in the near
1721 * future, but for not it works well.
1722 */
1723static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1724 struct rx_ring *rx_ring,
1725 struct ib_mac_iocb_rsp *ib_mac_rsp)
1726{
1727 struct bq_desc *lbq_desc;
1728 struct bq_desc *sbq_desc;
1729 struct sk_buff *skb = NULL;
1730 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1731 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1732
1733 /*
1734 * Handle the header buffer if present.
1735 */
1736 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1737 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001738 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1739 "Header of %d bytes in small buffer.\n", hdr_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001740 /*
1741 * Headers fit nicely into a small buffer.
1742 */
1743 sbq_desc = ql_get_curr_sbuf(rx_ring);
1744 pci_unmap_single(qdev->pdev,
1745 pci_unmap_addr(sbq_desc, mapaddr),
1746 pci_unmap_len(sbq_desc, maplen),
1747 PCI_DMA_FROMDEVICE);
1748 skb = sbq_desc->p.skb;
1749 ql_realign_skb(skb, hdr_len);
1750 skb_put(skb, hdr_len);
1751 sbq_desc->p.skb = NULL;
1752 }
1753
1754 /*
1755 * Handle the data buffer(s).
1756 */
1757 if (unlikely(!length)) { /* Is there data too? */
Joe Perchesae9540f2010-02-09 11:49:52 +00001758 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1759 "No Data buffer in this packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001760 return skb;
1761 }
1762
1763 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1764 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001765 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1766 "Headers in small, data of %d bytes in small, combine them.\n",
1767 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001768 /*
1769 * Data is less than small buffer size so it's
1770 * stuffed in a small buffer.
1771 * For this case we append the data
1772 * from the "data" small buffer to the "header" small
1773 * buffer.
1774 */
1775 sbq_desc = ql_get_curr_sbuf(rx_ring);
1776 pci_dma_sync_single_for_cpu(qdev->pdev,
1777 pci_unmap_addr
1778 (sbq_desc, mapaddr),
1779 pci_unmap_len
1780 (sbq_desc, maplen),
1781 PCI_DMA_FROMDEVICE);
1782 memcpy(skb_put(skb, length),
1783 sbq_desc->p.skb->data, length);
1784 pci_dma_sync_single_for_device(qdev->pdev,
1785 pci_unmap_addr
1786 (sbq_desc,
1787 mapaddr),
1788 pci_unmap_len
1789 (sbq_desc,
1790 maplen),
1791 PCI_DMA_FROMDEVICE);
1792 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00001793 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1794 "%d bytes in a single small buffer.\n",
1795 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001796 sbq_desc = ql_get_curr_sbuf(rx_ring);
1797 skb = sbq_desc->p.skb;
1798 ql_realign_skb(skb, length);
1799 skb_put(skb, length);
1800 pci_unmap_single(qdev->pdev,
1801 pci_unmap_addr(sbq_desc,
1802 mapaddr),
1803 pci_unmap_len(sbq_desc,
1804 maplen),
1805 PCI_DMA_FROMDEVICE);
1806 sbq_desc->p.skb = NULL;
1807 }
1808 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1809 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001810 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1811 "Header in small, %d bytes in large. Chain large to small!\n",
1812 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001813 /*
1814 * The data is in a single large buffer. We
1815 * chain it to the header buffer's skb and let
1816 * it rip.
1817 */
Ron Mercer7c734352009-10-19 03:32:19 +00001818 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Joe Perchesae9540f2010-02-09 11:49:52 +00001819 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1820 "Chaining page at offset = %d, for %d bytes to skb.\n",
1821 lbq_desc->p.pg_chunk.offset, length);
Ron Mercer7c734352009-10-19 03:32:19 +00001822 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1823 lbq_desc->p.pg_chunk.offset,
1824 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001825 skb->len += length;
1826 skb->data_len += length;
1827 skb->truesize += length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001828 } else {
1829 /*
1830 * The headers and data are in a single large buffer. We
1831 * copy it to a new skb and let it go. This can happen with
1832 * jumbo mtu on a non-TCP/UDP frame.
1833 */
Ron Mercer7c734352009-10-19 03:32:19 +00001834 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001835 skb = netdev_alloc_skb(qdev->ndev, length);
1836 if (skb == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001837 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1838 "No skb available, drop the packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001839 return NULL;
1840 }
Ron Mercer4055c7d42009-01-04 17:07:09 -08001841 pci_unmap_page(qdev->pdev,
1842 pci_unmap_addr(lbq_desc,
1843 mapaddr),
1844 pci_unmap_len(lbq_desc, maplen),
1845 PCI_DMA_FROMDEVICE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001846 skb_reserve(skb, NET_IP_ALIGN);
Joe Perchesae9540f2010-02-09 11:49:52 +00001847 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1848 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1849 length);
Ron Mercer7c734352009-10-19 03:32:19 +00001850 skb_fill_page_desc(skb, 0,
1851 lbq_desc->p.pg_chunk.page,
1852 lbq_desc->p.pg_chunk.offset,
1853 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001854 skb->len += length;
1855 skb->data_len += length;
1856 skb->truesize += length;
1857 length -= length;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001858 __pskb_pull_tail(skb,
1859 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1860 VLAN_ETH_HLEN : ETH_HLEN);
1861 }
1862 } else {
1863 /*
1864 * The data is in a chain of large buffers
1865 * pointed to by a small buffer. We loop
1866 * thru and chain them to the our small header
1867 * buffer's skb.
1868 * frags: There are 18 max frags and our small
1869 * buffer will hold 32 of them. The thing is,
1870 * we'll use 3 max for our 9000 byte jumbo
1871 * frames. If the MTU goes up we could
1872 * eventually be in trouble.
1873 */
Ron Mercer7c734352009-10-19 03:32:19 +00001874 int size, i = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001875 sbq_desc = ql_get_curr_sbuf(rx_ring);
1876 pci_unmap_single(qdev->pdev,
1877 pci_unmap_addr(sbq_desc, mapaddr),
1878 pci_unmap_len(sbq_desc, maplen),
1879 PCI_DMA_FROMDEVICE);
1880 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1881 /*
1882 * This is an non TCP/UDP IP frame, so
1883 * the headers aren't split into a small
1884 * buffer. We have to use the small buffer
1885 * that contains our sg list as our skb to
1886 * send upstairs. Copy the sg list here to
1887 * a local buffer and use it to find the
1888 * pages to chain.
1889 */
Joe Perchesae9540f2010-02-09 11:49:52 +00001890 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1891 "%d bytes of headers & data in chain of large.\n",
1892 length);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001893 skb = sbq_desc->p.skb;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001894 sbq_desc->p.skb = NULL;
1895 skb_reserve(skb, NET_IP_ALIGN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001896 }
1897 while (length > 0) {
Ron Mercer7c734352009-10-19 03:32:19 +00001898 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1899 size = (length < rx_ring->lbq_buf_size) ? length :
1900 rx_ring->lbq_buf_size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001901
Joe Perchesae9540f2010-02-09 11:49:52 +00001902 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1903 "Adding page %d to skb for %d bytes.\n",
1904 i, size);
Ron Mercer7c734352009-10-19 03:32:19 +00001905 skb_fill_page_desc(skb, i,
1906 lbq_desc->p.pg_chunk.page,
1907 lbq_desc->p.pg_chunk.offset,
1908 size);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001909 skb->len += size;
1910 skb->data_len += size;
1911 skb->truesize += size;
1912 length -= size;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001913 i++;
1914 }
1915 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1916 VLAN_ETH_HLEN : ETH_HLEN);
1917 }
1918 return skb;
1919}
1920
1921/* Process an inbound completion from an rx ring. */
Ron Mercer4f848c02010-01-02 10:37:43 +00001922static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001923 struct rx_ring *rx_ring,
Ron Mercer4f848c02010-01-02 10:37:43 +00001924 struct ib_mac_iocb_rsp *ib_mac_rsp,
1925 u16 vlan_id)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001926{
1927 struct net_device *ndev = qdev->ndev;
1928 struct sk_buff *skb = NULL;
1929
1930 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1931
1932 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1933 if (unlikely(!skb)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001934 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1935 "No skb available, drop packet.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00001936 rx_ring->rx_dropped++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001937 return;
1938 }
1939
Ron Mercera32959c2009-06-09 05:39:27 +00001940 /* Frame error, so drop the packet. */
1941 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001942 netif_err(qdev, drv, qdev->ndev,
1943 "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
Ron Mercera32959c2009-06-09 05:39:27 +00001944 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001945 rx_ring->rx_errors++;
Ron Mercera32959c2009-06-09 05:39:27 +00001946 return;
1947 }
Ron Mercerec33a492009-06-09 05:39:28 +00001948
1949 /* The max framesize filter on this chip is set higher than
1950 * MTU since FCoE uses 2k frames.
1951 */
1952 if (skb->len > ndev->mtu + ETH_HLEN) {
1953 dev_kfree_skb_any(skb);
Ron Mercer885ee392009-11-03 13:49:31 +00001954 rx_ring->rx_dropped++;
Ron Mercerec33a492009-06-09 05:39:28 +00001955 return;
1956 }
1957
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00001958 /* loopback self test for ethtool */
1959 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1960 ql_check_lb_frame(qdev, skb);
1961 dev_kfree_skb_any(skb);
1962 return;
1963 }
1964
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001965 prefetch(skb->data);
1966 skb->dev = ndev;
1967 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001968 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1969 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1970 IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1971 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1972 IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1973 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1974 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
Ron Mercer885ee392009-11-03 13:49:31 +00001975 rx_ring->rx_multicast++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001976 }
1977 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001978 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1979 "Promiscuous Packet.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04001980 }
Ron Mercerd555f592009-03-09 10:59:19 +00001981
Ron Mercerd555f592009-03-09 10:59:19 +00001982 skb->protocol = eth_type_trans(skb, ndev);
1983 skb->ip_summed = CHECKSUM_NONE;
1984
1985 /* If rx checksum is on, and there are no
1986 * csum or frame errors.
1987 */
1988 if (qdev->rx_csum &&
Ron Mercerd555f592009-03-09 10:59:19 +00001989 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1990 /* TCP frame. */
1991 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
Joe Perchesae9540f2010-02-09 11:49:52 +00001992 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1993 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00001994 skb->ip_summed = CHECKSUM_UNNECESSARY;
1995 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1996 (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1997 /* Unfragmented ipv4 UDP frame. */
1998 struct iphdr *iph = (struct iphdr *) skb->data;
1999 if (!(iph->frag_off &
2000 cpu_to_be16(IP_MF|IP_OFFSET))) {
2001 skb->ip_summed = CHECKSUM_UNNECESSARY;
Joe Perchesae9540f2010-02-09 11:49:52 +00002002 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2003 "TCP checksum done!\n");
Ron Mercerd555f592009-03-09 10:59:19 +00002004 }
2005 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002006 }
Ron Mercerd555f592009-03-09 10:59:19 +00002007
Ron Mercer885ee392009-11-03 13:49:31 +00002008 rx_ring->rx_packets++;
2009 rx_ring->rx_bytes += skb->len;
Ron Mercerb2014ff2009-08-27 11:02:09 +00002010 skb_record_rx_queue(skb, rx_ring->cq_id);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002011 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2012 if (qdev->vlgrp &&
2013 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2014 (vlan_id != 0))
2015 vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
2016 vlan_id, skb);
2017 else
2018 napi_gro_receive(&rx_ring->napi, skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002019 } else {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002020 if (qdev->vlgrp &&
2021 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
2022 (vlan_id != 0))
2023 vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
2024 else
2025 netif_receive_skb(skb);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002026 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002027}
2028
Ron Mercer4f848c02010-01-02 10:37:43 +00002029/* Process an inbound completion from an rx ring. */
2030static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2031 struct rx_ring *rx_ring,
2032 struct ib_mac_iocb_rsp *ib_mac_rsp)
2033{
2034 u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2035 u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2036 ((le16_to_cpu(ib_mac_rsp->vlan_id) &
2037 IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2038
2039 QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2040
2041 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2042 /* The data and headers are split into
2043 * separate buffers.
2044 */
2045 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2046 vlan_id);
2047 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2048 /* The data fit in a single small buffer.
2049 * Allocate a new skb, copy the data and
2050 * return the buffer to the free pool.
2051 */
2052 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2053 length, vlan_id);
Ron Mercer63526712010-01-02 10:37:44 +00002054 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2055 !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2056 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2057 /* TCP packet in a page chunk that's been checksummed.
2058 * Tack it on to our GRO skb and let it go.
2059 */
2060 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2061 length, vlan_id);
Ron Mercer4f848c02010-01-02 10:37:43 +00002062 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2063 /* Non-TCP packet in a page chunk. Allocate an
2064 * skb, tack it on frags, and send it up.
2065 */
2066 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2067 length, vlan_id);
2068 } else {
2069 struct bq_desc *lbq_desc;
2070
2071 /* Free small buffer that holds the IAL */
2072 lbq_desc = ql_get_curr_sbuf(rx_ring);
Joe Perchesae9540f2010-02-09 11:49:52 +00002073 netif_err(qdev, rx_err, qdev->ndev,
2074 "Dropping frame, len %d > mtu %d\n",
2075 length, qdev->ndev->mtu);
Ron Mercer4f848c02010-01-02 10:37:43 +00002076
2077 /* Unwind the large buffers for this frame. */
2078 while (length > 0) {
2079 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
2080 length -= (length < rx_ring->lbq_buf_size) ?
2081 length : rx_ring->lbq_buf_size;
2082 put_page(lbq_desc->p.pg_chunk.page);
2083 }
2084 }
2085
2086 return (unsigned long)length;
2087}
2088
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002089/* Process an outbound completion from an rx ring. */
2090static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2091 struct ob_mac_iocb_rsp *mac_rsp)
2092{
2093 struct tx_ring *tx_ring;
2094 struct tx_ring_desc *tx_ring_desc;
2095
2096 QL_DUMP_OB_MAC_RSP(mac_rsp);
2097 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2098 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2099 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
Ron Mercer885ee392009-11-03 13:49:31 +00002100 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2101 tx_ring->tx_packets++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002102 dev_kfree_skb(tx_ring_desc->skb);
2103 tx_ring_desc->skb = NULL;
2104
2105 if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2106 OB_MAC_IOCB_RSP_S |
2107 OB_MAC_IOCB_RSP_L |
2108 OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2109 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002110 netif_warn(qdev, tx_done, qdev->ndev,
2111 "Total descriptor length did not match transfer length.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002112 }
2113 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002114 netif_warn(qdev, tx_done, qdev->ndev,
2115 "Frame too short to be valid, not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002116 }
2117 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002118 netif_warn(qdev, tx_done, qdev->ndev,
2119 "Frame too long, but sent anyway.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002120 }
2121 if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002122 netif_warn(qdev, tx_done, qdev->ndev,
2123 "PCI backplane error. Frame not sent.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002124 }
2125 }
2126 atomic_inc(&tx_ring->tx_count);
2127}
2128
2129/* Fire up a handler to reset the MPI processor. */
2130void ql_queue_fw_error(struct ql_adapter *qdev)
2131{
Ron Mercer6a473302009-07-02 06:06:12 +00002132 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002133 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2134}
2135
2136void ql_queue_asic_error(struct ql_adapter *qdev)
2137{
Ron Mercer6a473302009-07-02 06:06:12 +00002138 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002139 ql_disable_interrupts(qdev);
Ron Mercer6497b602009-02-12 16:37:13 -08002140 /* Clear adapter up bit to signal the recovery
2141 * process that it shouldn't kill the reset worker
2142 * thread
2143 */
2144 clear_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002145 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2146}
2147
2148static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2149 struct ib_ae_iocb_rsp *ib_ae_rsp)
2150{
2151 switch (ib_ae_rsp->event) {
2152 case MGMT_ERR_EVENT:
Joe Perchesae9540f2010-02-09 11:49:52 +00002153 netif_err(qdev, rx_err, qdev->ndev,
2154 "Management Processor Fatal Error.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002155 ql_queue_fw_error(qdev);
2156 return;
2157
2158 case CAM_LOOKUP_ERR_EVENT:
Joe Perchesae9540f2010-02-09 11:49:52 +00002159 netif_err(qdev, link, qdev->ndev,
2160 "Multiple CAM hits lookup occurred.\n");
2161 netif_err(qdev, drv, qdev->ndev,
2162 "This event shouldn't occur.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002163 ql_queue_asic_error(qdev);
2164 return;
2165
2166 case SOFT_ECC_ERROR_EVENT:
Joe Perchesae9540f2010-02-09 11:49:52 +00002167 netif_err(qdev, rx_err, qdev->ndev,
2168 "Soft ECC error detected.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002169 ql_queue_asic_error(qdev);
2170 break;
2171
2172 case PCI_ERR_ANON_BUF_RD:
Joe Perchesae9540f2010-02-09 11:49:52 +00002173 netif_err(qdev, rx_err, qdev->ndev,
2174 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
2175 ib_ae_rsp->q_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002176 ql_queue_asic_error(qdev);
2177 break;
2178
2179 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00002180 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2181 ib_ae_rsp->event);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002182 ql_queue_asic_error(qdev);
2183 break;
2184 }
2185}
2186
2187static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2188{
2189 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002190 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002191 struct ob_mac_iocb_rsp *net_rsp = NULL;
2192 int count = 0;
2193
Ron Mercer1e213302009-03-09 10:59:21 +00002194 struct tx_ring *tx_ring;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002195 /* While there are entries in the completion queue. */
2196 while (prod != rx_ring->cnsmr_idx) {
2197
Joe Perchesae9540f2010-02-09 11:49:52 +00002198 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2199 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2200 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002201
2202 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2203 rmb();
2204 switch (net_rsp->opcode) {
2205
2206 case OPCODE_OB_MAC_TSO_IOCB:
2207 case OPCODE_OB_MAC_IOCB:
2208 ql_process_mac_tx_intr(qdev, net_rsp);
2209 break;
2210 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00002211 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2212 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2213 net_rsp->opcode);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002214 }
2215 count++;
2216 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002217 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002218 }
2219 ql_write_cq_idx(rx_ring);
Ron Mercer1e213302009-03-09 10:59:21 +00002220 tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2221 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id) &&
2222 net_rsp != NULL) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002223 if (atomic_read(&tx_ring->queue_stopped) &&
2224 (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2225 /*
2226 * The queue got stopped because the tx_ring was full.
2227 * Wake it up, because it's now at least 25% empty.
2228 */
Ron Mercer1e213302009-03-09 10:59:21 +00002229 netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002230 }
2231
2232 return count;
2233}
2234
2235static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2236{
2237 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002238 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002239 struct ql_net_rsp_iocb *net_rsp;
2240 int count = 0;
2241
2242 /* While there are entries in the completion queue. */
2243 while (prod != rx_ring->cnsmr_idx) {
2244
Joe Perchesae9540f2010-02-09 11:49:52 +00002245 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2246 "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2247 rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002248
2249 net_rsp = rx_ring->curr_entry;
2250 rmb();
2251 switch (net_rsp->opcode) {
2252 case OPCODE_IB_MAC_IOCB:
2253 ql_process_mac_rx_intr(qdev, rx_ring,
2254 (struct ib_mac_iocb_rsp *)
2255 net_rsp);
2256 break;
2257
2258 case OPCODE_IB_AE_IOCB:
2259 ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2260 net_rsp);
2261 break;
2262 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00002263 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2264 "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2265 net_rsp->opcode);
2266 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002267 }
2268 count++;
2269 ql_update_cq(rx_ring);
Ron Mercerba7cd3b2009-01-09 11:31:49 +00002270 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002271 if (count == budget)
2272 break;
2273 }
2274 ql_update_buffer_queues(qdev, rx_ring);
2275 ql_write_cq_idx(rx_ring);
2276 return count;
2277}
2278
2279static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2280{
2281 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2282 struct ql_adapter *qdev = rx_ring->qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00002283 struct rx_ring *trx_ring;
2284 int i, work_done = 0;
2285 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002286
Joe Perchesae9540f2010-02-09 11:49:52 +00002287 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2288 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002289
Ron Mercer39aa8162009-08-27 11:02:11 +00002290 /* Service the TX rings first. They start
2291 * right after the RSS rings. */
2292 for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2293 trx_ring = &qdev->rx_ring[i];
2294 /* If this TX completion ring belongs to this vector and
2295 * it's not empty then service it.
2296 */
2297 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2298 (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2299 trx_ring->cnsmr_idx)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002300 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2301 "%s: Servicing TX completion ring %d.\n",
2302 __func__, trx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002303 ql_clean_outbound_rx_ring(trx_ring);
2304 }
2305 }
2306
2307 /*
2308 * Now service the RSS ring if it's active.
2309 */
2310 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2311 rx_ring->cnsmr_idx) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002312 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2313 "%s: Servicing RX completion ring %d.\n",
2314 __func__, rx_ring->cq_id);
Ron Mercer39aa8162009-08-27 11:02:11 +00002315 work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2316 }
2317
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002318 if (work_done < budget) {
Ron Mercer22bdd4f2009-03-09 10:59:20 +00002319 napi_complete(napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002320 ql_enable_completion_interrupt(qdev, rx_ring->irq);
2321 }
2322 return work_done;
2323}
2324
Ron Mercer01e6b952009-10-30 12:13:34 +00002325static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002326{
2327 struct ql_adapter *qdev = netdev_priv(ndev);
2328
2329 qdev->vlgrp = grp;
2330 if (grp) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002331 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2332 "Turning on VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002333 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2334 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2335 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00002336 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
2337 "Turning off VLAN in NIC_RCV_CFG.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002338 ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2339 }
2340}
2341
Ron Mercer01e6b952009-10-30 12:13:34 +00002342static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002343{
2344 struct ql_adapter *qdev = netdev_priv(ndev);
2345 u32 enable_bit = MAC_ADDR_E;
Ron Mercercc288f52009-02-23 10:42:14 +00002346 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002347
Ron Mercercc288f52009-02-23 10:42:14 +00002348 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2349 if (status)
2350 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002351 if (ql_set_mac_addr_reg
2352 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002353 netif_err(qdev, ifup, qdev->ndev,
2354 "Failed to init vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002355 }
Ron Mercercc288f52009-02-23 10:42:14 +00002356 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002357}
2358
Ron Mercer01e6b952009-10-30 12:13:34 +00002359static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002360{
2361 struct ql_adapter *qdev = netdev_priv(ndev);
2362 u32 enable_bit = 0;
Ron Mercercc288f52009-02-23 10:42:14 +00002363 int status;
2364
2365 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2366 if (status)
2367 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002368
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002369 if (ql_set_mac_addr_reg
2370 (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002371 netif_err(qdev, ifup, qdev->ndev,
2372 "Failed to clear vlan address.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002373 }
Ron Mercercc288f52009-02-23 10:42:14 +00002374 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002375
2376}
2377
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002378/* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
2379static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2380{
2381 struct rx_ring *rx_ring = dev_id;
Ben Hutchings288379f2009-01-19 16:43:59 -08002382 napi_schedule(&rx_ring->napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002383 return IRQ_HANDLED;
2384}
2385
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002386/* This handles a fatal error, MPI activity, and the default
2387 * rx_ring in an MSI-X multiple vector environment.
2388 * In MSI/Legacy environment it also process the rest of
2389 * the rx_rings.
2390 */
2391static irqreturn_t qlge_isr(int irq, void *dev_id)
2392{
2393 struct rx_ring *rx_ring = dev_id;
2394 struct ql_adapter *qdev = rx_ring->qdev;
2395 struct intr_context *intr_context = &qdev->intr_context[0];
2396 u32 var;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002397 int work_done = 0;
2398
Ron Mercerbb0d2152008-10-20 10:30:26 -07002399 spin_lock(&qdev->hw_lock);
2400 if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002401 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2402 "Shared Interrupt, Not ours!\n");
Ron Mercerbb0d2152008-10-20 10:30:26 -07002403 spin_unlock(&qdev->hw_lock);
2404 return IRQ_NONE;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002405 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002406 spin_unlock(&qdev->hw_lock);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002407
Ron Mercerbb0d2152008-10-20 10:30:26 -07002408 var = ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002409
2410 /*
2411 * Check for fatal error.
2412 */
2413 if (var & STS_FE) {
2414 ql_queue_asic_error(qdev);
Joe Perchesae9540f2010-02-09 11:49:52 +00002415 netif_err(qdev, intr, qdev->ndev,
2416 "Got fatal error, STS = %x.\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002417 var = ql_read32(qdev, ERR_STS);
Joe Perchesae9540f2010-02-09 11:49:52 +00002418 netif_err(qdev, intr, qdev->ndev,
2419 "Resetting chip. Error Status Register = 0x%x\n", var);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002420 return IRQ_HANDLED;
2421 }
2422
2423 /*
2424 * Check MPI processor activity.
2425 */
Ron Mercer5ee22a52009-10-05 11:46:48 +00002426 if ((var & STS_PI) &&
2427 (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002428 /*
2429 * We've got an async event or mailbox completion.
2430 * Handle it and clear the source of the interrupt.
2431 */
Joe Perchesae9540f2010-02-09 11:49:52 +00002432 netif_err(qdev, intr, qdev->ndev,
2433 "Got MPI processor interrupt.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002434 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer5ee22a52009-10-05 11:46:48 +00002435 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2436 queue_delayed_work_on(smp_processor_id(),
2437 qdev->workqueue, &qdev->mpi_work, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002438 work_done++;
2439 }
2440
2441 /*
Ron Mercer39aa8162009-08-27 11:02:11 +00002442 * Get the bit-mask that shows the active queues for this
2443 * pass. Compare it to the queues that this irq services
2444 * and call napi if there's a match.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002445 */
Ron Mercer39aa8162009-08-27 11:02:11 +00002446 var = ql_read32(qdev, ISR1);
2447 if (var & intr_context->irq_mask) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002448 netif_info(qdev, intr, qdev->ndev,
2449 "Waking handler for rx_ring[0].\n");
Ron Mercer39aa8162009-08-27 11:02:11 +00002450 ql_disable_completion_interrupt(qdev, intr_context->intr);
Ron Mercer32a5b2a2009-11-03 13:49:29 +00002451 napi_schedule(&rx_ring->napi);
2452 work_done++;
2453 }
Ron Mercerbb0d2152008-10-20 10:30:26 -07002454 ql_enable_completion_interrupt(qdev, intr_context->intr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002455 return work_done ? IRQ_HANDLED : IRQ_NONE;
2456}
2457
2458static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2459{
2460
2461 if (skb_is_gso(skb)) {
2462 int err;
2463 if (skb_header_cloned(skb)) {
2464 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2465 if (err)
2466 return err;
2467 }
2468
2469 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2470 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2471 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2472 mac_iocb_ptr->total_hdrs_len =
2473 cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2474 mac_iocb_ptr->net_trans_offset =
2475 cpu_to_le16(skb_network_offset(skb) |
2476 skb_transport_offset(skb)
2477 << OB_MAC_TRANSPORT_HDR_SHIFT);
2478 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2479 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2480 if (likely(skb->protocol == htons(ETH_P_IP))) {
2481 struct iphdr *iph = ip_hdr(skb);
2482 iph->check = 0;
2483 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2484 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2485 iph->daddr, 0,
2486 IPPROTO_TCP,
2487 0);
2488 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2489 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2490 tcp_hdr(skb)->check =
2491 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2492 &ipv6_hdr(skb)->daddr,
2493 0, IPPROTO_TCP, 0);
2494 }
2495 return 1;
2496 }
2497 return 0;
2498}
2499
2500static void ql_hw_csum_setup(struct sk_buff *skb,
2501 struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2502{
2503 int len;
2504 struct iphdr *iph = ip_hdr(skb);
Ron Mercerfd2df4f2009-01-05 18:18:45 -08002505 __sum16 *check;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002506 mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2507 mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2508 mac_iocb_ptr->net_trans_offset =
2509 cpu_to_le16(skb_network_offset(skb) |
2510 skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2511
2512 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2513 len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2514 if (likely(iph->protocol == IPPROTO_TCP)) {
2515 check = &(tcp_hdr(skb)->check);
2516 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2517 mac_iocb_ptr->total_hdrs_len =
2518 cpu_to_le16(skb_transport_offset(skb) +
2519 (tcp_hdr(skb)->doff << 2));
2520 } else {
2521 check = &(udp_hdr(skb)->check);
2522 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2523 mac_iocb_ptr->total_hdrs_len =
2524 cpu_to_le16(skb_transport_offset(skb) +
2525 sizeof(struct udphdr));
2526 }
2527 *check = ~csum_tcpudp_magic(iph->saddr,
2528 iph->daddr, len, iph->protocol, 0);
2529}
2530
Stephen Hemminger613573252009-08-31 19:50:58 +00002531static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002532{
2533 struct tx_ring_desc *tx_ring_desc;
2534 struct ob_mac_iocb_req *mac_iocb_ptr;
2535 struct ql_adapter *qdev = netdev_priv(ndev);
2536 int tso;
2537 struct tx_ring *tx_ring;
Ron Mercer1e213302009-03-09 10:59:21 +00002538 u32 tx_ring_idx = (u32) skb->queue_mapping;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002539
2540 tx_ring = &qdev->tx_ring[tx_ring_idx];
2541
Ron Mercer74c50b42009-03-09 10:59:27 +00002542 if (skb_padto(skb, ETH_ZLEN))
2543 return NETDEV_TX_OK;
2544
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002545 if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002546 netif_info(qdev, tx_queued, qdev->ndev,
2547 "%s: shutting down tx queue %d du to lack of resources.\n",
2548 __func__, tx_ring_idx);
Ron Mercer1e213302009-03-09 10:59:21 +00002549 netif_stop_subqueue(ndev, tx_ring->wq_id);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002550 atomic_inc(&tx_ring->queue_stopped);
Ron Mercer885ee392009-11-03 13:49:31 +00002551 tx_ring->tx_errors++;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002552 return NETDEV_TX_BUSY;
2553 }
2554 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2555 mac_iocb_ptr = tx_ring_desc->queue_entry;
Ron Mercere3324712009-07-02 06:06:13 +00002556 memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002557
2558 mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2559 mac_iocb_ptr->tid = tx_ring_desc->index;
2560 /* We use the upper 32-bits to store the tx queue for this IO.
2561 * When we get the completion we can use it to establish the context.
2562 */
2563 mac_iocb_ptr->txq_idx = tx_ring_idx;
2564 tx_ring_desc->skb = skb;
2565
2566 mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2567
2568 if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002569 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2570 "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002571 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2572 mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2573 }
2574 tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2575 if (tso < 0) {
2576 dev_kfree_skb_any(skb);
2577 return NETDEV_TX_OK;
2578 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2579 ql_hw_csum_setup(skb,
2580 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2581 }
Ron Mercer0d979f72009-02-12 16:38:03 -08002582 if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2583 NETDEV_TX_OK) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002584 netif_err(qdev, tx_queued, qdev->ndev,
2585 "Could not map the segments.\n");
Ron Mercer885ee392009-11-03 13:49:31 +00002586 tx_ring->tx_errors++;
Ron Mercer0d979f72009-02-12 16:38:03 -08002587 return NETDEV_TX_BUSY;
2588 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002589 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2590 tx_ring->prod_idx++;
2591 if (tx_ring->prod_idx == tx_ring->wq_len)
2592 tx_ring->prod_idx = 0;
2593 wmb();
2594
2595 ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
Joe Perchesae9540f2010-02-09 11:49:52 +00002596 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2597 "tx queued, slot %d, len %d\n",
2598 tx_ring->prod_idx, skb->len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002599
2600 atomic_dec(&tx_ring->tx_count);
2601 return NETDEV_TX_OK;
2602}
2603
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00002604
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002605static void ql_free_shadow_space(struct ql_adapter *qdev)
2606{
2607 if (qdev->rx_ring_shadow_reg_area) {
2608 pci_free_consistent(qdev->pdev,
2609 PAGE_SIZE,
2610 qdev->rx_ring_shadow_reg_area,
2611 qdev->rx_ring_shadow_reg_dma);
2612 qdev->rx_ring_shadow_reg_area = NULL;
2613 }
2614 if (qdev->tx_ring_shadow_reg_area) {
2615 pci_free_consistent(qdev->pdev,
2616 PAGE_SIZE,
2617 qdev->tx_ring_shadow_reg_area,
2618 qdev->tx_ring_shadow_reg_dma);
2619 qdev->tx_ring_shadow_reg_area = NULL;
2620 }
2621}
2622
2623static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2624{
2625 qdev->rx_ring_shadow_reg_area =
2626 pci_alloc_consistent(qdev->pdev,
2627 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2628 if (qdev->rx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002629 netif_err(qdev, ifup, qdev->ndev,
2630 "Allocation of RX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002631 return -ENOMEM;
2632 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002633 memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002634 qdev->tx_ring_shadow_reg_area =
2635 pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2636 &qdev->tx_ring_shadow_reg_dma);
2637 if (qdev->tx_ring_shadow_reg_area == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002638 netif_err(qdev, ifup, qdev->ndev,
2639 "Allocation of TX shadow space failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002640 goto err_wqp_sh_area;
2641 }
Ron Mercerb25215d2009-03-09 10:59:24 +00002642 memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002643 return 0;
2644
2645err_wqp_sh_area:
2646 pci_free_consistent(qdev->pdev,
2647 PAGE_SIZE,
2648 qdev->rx_ring_shadow_reg_area,
2649 qdev->rx_ring_shadow_reg_dma);
2650 return -ENOMEM;
2651}
2652
2653static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2654{
2655 struct tx_ring_desc *tx_ring_desc;
2656 int i;
2657 struct ob_mac_iocb_req *mac_iocb_ptr;
2658
2659 mac_iocb_ptr = tx_ring->wq_base;
2660 tx_ring_desc = tx_ring->q;
2661 for (i = 0; i < tx_ring->wq_len; i++) {
2662 tx_ring_desc->index = i;
2663 tx_ring_desc->skb = NULL;
2664 tx_ring_desc->queue_entry = mac_iocb_ptr;
2665 mac_iocb_ptr++;
2666 tx_ring_desc++;
2667 }
2668 atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2669 atomic_set(&tx_ring->queue_stopped, 0);
2670}
2671
2672static void ql_free_tx_resources(struct ql_adapter *qdev,
2673 struct tx_ring *tx_ring)
2674{
2675 if (tx_ring->wq_base) {
2676 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2677 tx_ring->wq_base, tx_ring->wq_base_dma);
2678 tx_ring->wq_base = NULL;
2679 }
2680 kfree(tx_ring->q);
2681 tx_ring->q = NULL;
2682}
2683
2684static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2685 struct tx_ring *tx_ring)
2686{
2687 tx_ring->wq_base =
2688 pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2689 &tx_ring->wq_base_dma);
2690
Joe Perches8e95a202009-12-03 07:58:21 +00002691 if ((tx_ring->wq_base == NULL) ||
2692 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002693 netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002694 return -ENOMEM;
2695 }
2696 tx_ring->q =
2697 kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2698 if (tx_ring->q == NULL)
2699 goto err;
2700
2701 return 0;
2702err:
2703 pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2704 tx_ring->wq_base, tx_ring->wq_base_dma);
2705 return -ENOMEM;
2706}
2707
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002708static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002709{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002710 struct bq_desc *lbq_desc;
2711
Ron Mercer7c734352009-10-19 03:32:19 +00002712 uint32_t curr_idx, clean_idx;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002713
Ron Mercer7c734352009-10-19 03:32:19 +00002714 curr_idx = rx_ring->lbq_curr_idx;
2715 clean_idx = rx_ring->lbq_clean_idx;
2716 while (curr_idx != clean_idx) {
2717 lbq_desc = &rx_ring->lbq[curr_idx];
2718
2719 if (lbq_desc->p.pg_chunk.last_flag) {
2720 pci_unmap_page(qdev->pdev,
2721 lbq_desc->p.pg_chunk.map,
2722 ql_lbq_block_size(qdev),
2723 PCI_DMA_FROMDEVICE);
2724 lbq_desc->p.pg_chunk.last_flag = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002725 }
Ron Mercer7c734352009-10-19 03:32:19 +00002726
2727 put_page(lbq_desc->p.pg_chunk.page);
2728 lbq_desc->p.pg_chunk.page = NULL;
2729
2730 if (++curr_idx == rx_ring->lbq_len)
2731 curr_idx = 0;
2732
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002733 }
2734}
2735
Stephen Hemminger8668ae92008-11-21 17:29:50 -08002736static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002737{
2738 int i;
2739 struct bq_desc *sbq_desc;
2740
2741 for (i = 0; i < rx_ring->sbq_len; i++) {
2742 sbq_desc = &rx_ring->sbq[i];
2743 if (sbq_desc == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002744 netif_err(qdev, ifup, qdev->ndev,
2745 "sbq_desc %d is NULL.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002746 return;
2747 }
2748 if (sbq_desc->p.skb) {
2749 pci_unmap_single(qdev->pdev,
2750 pci_unmap_addr(sbq_desc, mapaddr),
2751 pci_unmap_len(sbq_desc, maplen),
2752 PCI_DMA_FROMDEVICE);
2753 dev_kfree_skb(sbq_desc->p.skb);
2754 sbq_desc->p.skb = NULL;
2755 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002756 }
2757}
2758
Ron Mercer4545a3f2009-02-23 10:42:17 +00002759/* Free all large and small rx buffers associated
2760 * with the completion queues for this device.
2761 */
2762static void ql_free_rx_buffers(struct ql_adapter *qdev)
2763{
2764 int i;
2765 struct rx_ring *rx_ring;
2766
2767 for (i = 0; i < qdev->rx_ring_count; i++) {
2768 rx_ring = &qdev->rx_ring[i];
2769 if (rx_ring->lbq)
2770 ql_free_lbq_buffers(qdev, rx_ring);
2771 if (rx_ring->sbq)
2772 ql_free_sbq_buffers(qdev, rx_ring);
2773 }
2774}
2775
2776static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2777{
2778 struct rx_ring *rx_ring;
2779 int i;
2780
2781 for (i = 0; i < qdev->rx_ring_count; i++) {
2782 rx_ring = &qdev->rx_ring[i];
2783 if (rx_ring->type != TX_Q)
2784 ql_update_buffer_queues(qdev, rx_ring);
2785 }
2786}
2787
2788static void ql_init_lbq_ring(struct ql_adapter *qdev,
2789 struct rx_ring *rx_ring)
2790{
2791 int i;
2792 struct bq_desc *lbq_desc;
2793 __le64 *bq = rx_ring->lbq_base;
2794
2795 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2796 for (i = 0; i < rx_ring->lbq_len; i++) {
2797 lbq_desc = &rx_ring->lbq[i];
2798 memset(lbq_desc, 0, sizeof(*lbq_desc));
2799 lbq_desc->index = i;
2800 lbq_desc->addr = bq;
2801 bq++;
2802 }
2803}
2804
2805static void ql_init_sbq_ring(struct ql_adapter *qdev,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002806 struct rx_ring *rx_ring)
2807{
2808 int i;
2809 struct bq_desc *sbq_desc;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002810 __le64 *bq = rx_ring->sbq_base;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002811
Ron Mercer4545a3f2009-02-23 10:42:17 +00002812 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002813 for (i = 0; i < rx_ring->sbq_len; i++) {
2814 sbq_desc = &rx_ring->sbq[i];
Ron Mercer4545a3f2009-02-23 10:42:17 +00002815 memset(sbq_desc, 0, sizeof(*sbq_desc));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002816 sbq_desc->index = i;
Ron Mercer2c9a0d42009-01-05 18:19:20 -08002817 sbq_desc->addr = bq;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002818 bq++;
2819 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002820}
2821
2822static void ql_free_rx_resources(struct ql_adapter *qdev,
2823 struct rx_ring *rx_ring)
2824{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002825 /* Free the small buffer queue. */
2826 if (rx_ring->sbq_base) {
2827 pci_free_consistent(qdev->pdev,
2828 rx_ring->sbq_size,
2829 rx_ring->sbq_base, rx_ring->sbq_base_dma);
2830 rx_ring->sbq_base = NULL;
2831 }
2832
2833 /* Free the small buffer queue control blocks. */
2834 kfree(rx_ring->sbq);
2835 rx_ring->sbq = NULL;
2836
2837 /* Free the large buffer queue. */
2838 if (rx_ring->lbq_base) {
2839 pci_free_consistent(qdev->pdev,
2840 rx_ring->lbq_size,
2841 rx_ring->lbq_base, rx_ring->lbq_base_dma);
2842 rx_ring->lbq_base = NULL;
2843 }
2844
2845 /* Free the large buffer queue control blocks. */
2846 kfree(rx_ring->lbq);
2847 rx_ring->lbq = NULL;
2848
2849 /* Free the rx queue. */
2850 if (rx_ring->cq_base) {
2851 pci_free_consistent(qdev->pdev,
2852 rx_ring->cq_size,
2853 rx_ring->cq_base, rx_ring->cq_base_dma);
2854 rx_ring->cq_base = NULL;
2855 }
2856}
2857
2858/* Allocate queues and buffers for this completions queue based
2859 * on the values in the parameter structure. */
2860static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2861 struct rx_ring *rx_ring)
2862{
2863
2864 /*
2865 * Allocate the completion queue for this rx_ring.
2866 */
2867 rx_ring->cq_base =
2868 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2869 &rx_ring->cq_base_dma);
2870
2871 if (rx_ring->cq_base == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002872 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002873 return -ENOMEM;
2874 }
2875
2876 if (rx_ring->sbq_len) {
2877 /*
2878 * Allocate small buffer queue.
2879 */
2880 rx_ring->sbq_base =
2881 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2882 &rx_ring->sbq_base_dma);
2883
2884 if (rx_ring->sbq_base == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002885 netif_err(qdev, ifup, qdev->ndev,
2886 "Small buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002887 goto err_mem;
2888 }
2889
2890 /*
2891 * Allocate small buffer queue control blocks.
2892 */
2893 rx_ring->sbq =
2894 kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2895 GFP_KERNEL);
2896 if (rx_ring->sbq == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002897 netif_err(qdev, ifup, qdev->ndev,
2898 "Small buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002899 goto err_mem;
2900 }
2901
Ron Mercer4545a3f2009-02-23 10:42:17 +00002902 ql_init_sbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002903 }
2904
2905 if (rx_ring->lbq_len) {
2906 /*
2907 * Allocate large buffer queue.
2908 */
2909 rx_ring->lbq_base =
2910 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2911 &rx_ring->lbq_base_dma);
2912
2913 if (rx_ring->lbq_base == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002914 netif_err(qdev, ifup, qdev->ndev,
2915 "Large buffer queue allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002916 goto err_mem;
2917 }
2918 /*
2919 * Allocate large buffer queue control blocks.
2920 */
2921 rx_ring->lbq =
2922 kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2923 GFP_KERNEL);
2924 if (rx_ring->lbq == NULL) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002925 netif_err(qdev, ifup, qdev->ndev,
2926 "Large buffer queue control block allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002927 goto err_mem;
2928 }
2929
Ron Mercer4545a3f2009-02-23 10:42:17 +00002930 ql_init_lbq_ring(qdev, rx_ring);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002931 }
2932
2933 return 0;
2934
2935err_mem:
2936 ql_free_rx_resources(qdev, rx_ring);
2937 return -ENOMEM;
2938}
2939
2940static void ql_tx_ring_clean(struct ql_adapter *qdev)
2941{
2942 struct tx_ring *tx_ring;
2943 struct tx_ring_desc *tx_ring_desc;
2944 int i, j;
2945
2946 /*
2947 * Loop through all queues and free
2948 * any resources.
2949 */
2950 for (j = 0; j < qdev->tx_ring_count; j++) {
2951 tx_ring = &qdev->tx_ring[j];
2952 for (i = 0; i < tx_ring->wq_len; i++) {
2953 tx_ring_desc = &tx_ring->q[i];
2954 if (tx_ring_desc && tx_ring_desc->skb) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002955 netif_err(qdev, ifdown, qdev->ndev,
2956 "Freeing lost SKB %p, from queue %d, index %d.\n",
2957 tx_ring_desc->skb, j,
2958 tx_ring_desc->index);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002959 ql_unmap_send(qdev, tx_ring_desc,
2960 tx_ring_desc->map_cnt);
2961 dev_kfree_skb(tx_ring_desc->skb);
2962 tx_ring_desc->skb = NULL;
2963 }
2964 }
2965 }
2966}
2967
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002968static void ql_free_mem_resources(struct ql_adapter *qdev)
2969{
2970 int i;
2971
2972 for (i = 0; i < qdev->tx_ring_count; i++)
2973 ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
2974 for (i = 0; i < qdev->rx_ring_count; i++)
2975 ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
2976 ql_free_shadow_space(qdev);
2977}
2978
2979static int ql_alloc_mem_resources(struct ql_adapter *qdev)
2980{
2981 int i;
2982
2983 /* Allocate space for our shadow registers and such. */
2984 if (ql_alloc_shadow_space(qdev))
2985 return -ENOMEM;
2986
2987 for (i = 0; i < qdev->rx_ring_count; i++) {
2988 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002989 netif_err(qdev, ifup, qdev->ndev,
2990 "RX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002991 goto err_mem;
2992 }
2993 }
2994 /* Allocate tx queue resources */
2995 for (i = 0; i < qdev->tx_ring_count; i++) {
2996 if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
Joe Perchesae9540f2010-02-09 11:49:52 +00002997 netif_err(qdev, ifup, qdev->ndev,
2998 "TX resource allocation failed.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04002999 goto err_mem;
3000 }
3001 }
3002 return 0;
3003
3004err_mem:
3005 ql_free_mem_resources(qdev);
3006 return -ENOMEM;
3007}
3008
3009/* Set up the rx ring control block and pass it to the chip.
3010 * The control block is defined as
3011 * "Completion Queue Initialization Control Block", or cqicb.
3012 */
3013static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3014{
3015 struct cqicb *cqicb = &rx_ring->cqicb;
3016 void *shadow_reg = qdev->rx_ring_shadow_reg_area +
Ron Mercerb8facca2009-06-10 15:49:34 +00003017 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003018 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
Ron Mercerb8facca2009-06-10 15:49:34 +00003019 (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003020 void __iomem *doorbell_area =
3021 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3022 int err = 0;
3023 u16 bq_len;
Ron Mercerd4a4aba2009-03-09 10:59:28 +00003024 u64 tmp;
Ron Mercerb8facca2009-06-10 15:49:34 +00003025 __le64 *base_indirect_ptr;
3026 int page_entries;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003027
3028 /* Set up the shadow registers for this ring. */
3029 rx_ring->prod_idx_sh_reg = shadow_reg;
3030 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
Ron Mercer7c734352009-10-19 03:32:19 +00003031 *rx_ring->prod_idx_sh_reg = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003032 shadow_reg += sizeof(u64);
3033 shadow_reg_dma += sizeof(u64);
3034 rx_ring->lbq_base_indirect = shadow_reg;
3035 rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003036 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3037 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003038 rx_ring->sbq_base_indirect = shadow_reg;
3039 rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3040
3041 /* PCI doorbell mem area + 0x00 for consumer index register */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003042 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003043 rx_ring->cnsmr_idx = 0;
3044 rx_ring->curr_entry = rx_ring->cq_base;
3045
3046 /* PCI doorbell mem area + 0x04 for valid register */
3047 rx_ring->valid_db_reg = doorbell_area + 0x04;
3048
3049 /* PCI doorbell mem area + 0x18 for large buffer consumer */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003050 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003051
3052 /* PCI doorbell mem area + 0x1c */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003053 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003054
3055 memset((void *)cqicb, 0, sizeof(struct cqicb));
3056 cqicb->msix_vect = rx_ring->irq;
3057
Ron Mercer459caf52009-01-04 17:08:11 -08003058 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3059 cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003060
Ron Mercer97345522009-01-09 11:31:50 +00003061 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003062
Ron Mercer97345522009-01-09 11:31:50 +00003063 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003064
3065 /*
3066 * Set up the control block load flags.
3067 */
3068 cqicb->flags = FLAGS_LC | /* Load queue base address */
3069 FLAGS_LV | /* Load MSI-X vector */
3070 FLAGS_LI; /* Load irq delay values */
3071 if (rx_ring->lbq_len) {
3072 cqicb->flags |= FLAGS_LL; /* Load lbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003073 tmp = (u64)rx_ring->lbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003074 base_indirect_ptr = (__le64 *) rx_ring->lbq_base_indirect;
3075 page_entries = 0;
3076 do {
3077 *base_indirect_ptr = cpu_to_le64(tmp);
3078 tmp += DB_PAGE_SIZE;
3079 base_indirect_ptr++;
3080 page_entries++;
3081 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003082 cqicb->lbq_addr =
3083 cpu_to_le64(rx_ring->lbq_base_indirect_dma);
Ron Mercer459caf52009-01-04 17:08:11 -08003084 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3085 (u16) rx_ring->lbq_buf_size;
3086 cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3087 bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3088 (u16) rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003089 cqicb->lbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003090 rx_ring->lbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003091 rx_ring->lbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003092 rx_ring->lbq_clean_idx = 0;
3093 rx_ring->lbq_free_cnt = rx_ring->lbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003094 }
3095 if (rx_ring->sbq_len) {
3096 cqicb->flags |= FLAGS_LS; /* Load sbq values */
Joe Perchesa419aef2009-08-18 11:18:35 -07003097 tmp = (u64)rx_ring->sbq_base_dma;
Ron Mercerb8facca2009-06-10 15:49:34 +00003098 base_indirect_ptr = (__le64 *) rx_ring->sbq_base_indirect;
3099 page_entries = 0;
3100 do {
3101 *base_indirect_ptr = cpu_to_le64(tmp);
3102 tmp += DB_PAGE_SIZE;
3103 base_indirect_ptr++;
3104 page_entries++;
3105 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
Ron Mercer97345522009-01-09 11:31:50 +00003106 cqicb->sbq_addr =
3107 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003108 cqicb->sbq_buf_size =
Ron Mercer52e55f32009-10-10 09:35:07 +00003109 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
Ron Mercer459caf52009-01-04 17:08:11 -08003110 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3111 (u16) rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003112 cqicb->sbq_len = cpu_to_le16(bq_len);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003113 rx_ring->sbq_prod_idx = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003114 rx_ring->sbq_curr_idx = 0;
Ron Mercer4545a3f2009-02-23 10:42:17 +00003115 rx_ring->sbq_clean_idx = 0;
3116 rx_ring->sbq_free_cnt = rx_ring->sbq_len;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003117 }
3118 switch (rx_ring->type) {
3119 case TX_Q:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003120 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3121 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3122 break;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003123 case RX_Q:
3124 /* Inbound completion handling rx_rings run in
3125 * separate NAPI contexts.
3126 */
3127 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3128 64);
3129 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3130 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3131 break;
3132 default:
Joe Perchesae9540f2010-02-09 11:49:52 +00003133 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3134 "Invalid rx_ring->type = %d.\n", rx_ring->type);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003135 }
Joe Perchesae9540f2010-02-09 11:49:52 +00003136 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3137 "Initializing rx work queue.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003138 err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3139 CFG_LCQ, rx_ring->cq_id);
3140 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003141 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003142 return err;
3143 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003144 return err;
3145}
3146
3147static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3148{
3149 struct wqicb *wqicb = (struct wqicb *)tx_ring;
3150 void __iomem *doorbell_area =
3151 qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3152 void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3153 (tx_ring->wq_id * sizeof(u64));
3154 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3155 (tx_ring->wq_id * sizeof(u64));
3156 int err = 0;
3157
3158 /*
3159 * Assign doorbell registers for this tx_ring.
3160 */
3161 /* TX PCI doorbell mem area for tx producer index */
Stephen Hemminger8668ae92008-11-21 17:29:50 -08003162 tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003163 tx_ring->prod_idx = 0;
3164 /* TX PCI doorbell mem area + 0x04 */
3165 tx_ring->valid_db_reg = doorbell_area + 0x04;
3166
3167 /*
3168 * Assign shadow registers for this tx_ring.
3169 */
3170 tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3171 tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3172
3173 wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3174 wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3175 Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3176 wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3177 wqicb->rid = 0;
Ron Mercer97345522009-01-09 11:31:50 +00003178 wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003179
Ron Mercer97345522009-01-09 11:31:50 +00003180 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003181
3182 ql_init_tx_ring(qdev, tx_ring);
3183
Ron Mercere3324712009-07-02 06:06:13 +00003184 err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003185 (u16) tx_ring->wq_id);
3186 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003187 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003188 return err;
3189 }
Joe Perchesae9540f2010-02-09 11:49:52 +00003190 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3191 "Successfully loaded WQICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003192 return err;
3193}
3194
3195static void ql_disable_msix(struct ql_adapter *qdev)
3196{
3197 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3198 pci_disable_msix(qdev->pdev);
3199 clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3200 kfree(qdev->msi_x_entry);
3201 qdev->msi_x_entry = NULL;
3202 } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3203 pci_disable_msi(qdev->pdev);
3204 clear_bit(QL_MSI_ENABLED, &qdev->flags);
3205 }
3206}
3207
Ron Mercera4ab6132009-08-27 11:02:10 +00003208/* We start by trying to get the number of vectors
3209 * stored in qdev->intr_count. If we don't get that
3210 * many then we reduce the count and try again.
3211 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003212static void ql_enable_msix(struct ql_adapter *qdev)
3213{
Ron Mercera4ab6132009-08-27 11:02:10 +00003214 int i, err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003215
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003216 /* Get the MSIX vectors. */
Ron Mercera5a62a12009-11-11 12:54:05 +00003217 if (qlge_irq_type == MSIX_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003218 /* Try to alloc space for the msix struct,
3219 * if it fails then go to MSI/legacy.
3220 */
Ron Mercera4ab6132009-08-27 11:02:10 +00003221 qdev->msi_x_entry = kcalloc(qdev->intr_count,
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003222 sizeof(struct msix_entry),
3223 GFP_KERNEL);
3224 if (!qdev->msi_x_entry) {
Ron Mercera5a62a12009-11-11 12:54:05 +00003225 qlge_irq_type = MSI_IRQ;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003226 goto msi;
3227 }
3228
Ron Mercera4ab6132009-08-27 11:02:10 +00003229 for (i = 0; i < qdev->intr_count; i++)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003230 qdev->msi_x_entry[i].entry = i;
3231
Ron Mercera4ab6132009-08-27 11:02:10 +00003232 /* Loop to get our vectors. We start with
3233 * what we want and settle for what we get.
3234 */
3235 do {
3236 err = pci_enable_msix(qdev->pdev,
3237 qdev->msi_x_entry, qdev->intr_count);
3238 if (err > 0)
3239 qdev->intr_count = err;
3240 } while (err > 0);
3241
3242 if (err < 0) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003243 kfree(qdev->msi_x_entry);
3244 qdev->msi_x_entry = NULL;
Joe Perchesae9540f2010-02-09 11:49:52 +00003245 netif_warn(qdev, ifup, qdev->ndev,
3246 "MSI-X Enable failed, trying MSI.\n");
Ron Mercera4ab6132009-08-27 11:02:10 +00003247 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003248 qlge_irq_type = MSI_IRQ;
Ron Mercera4ab6132009-08-27 11:02:10 +00003249 } else if (err == 0) {
3250 set_bit(QL_MSIX_ENABLED, &qdev->flags);
Joe Perchesae9540f2010-02-09 11:49:52 +00003251 netif_info(qdev, ifup, qdev->ndev,
3252 "MSI-X Enabled, got %d vectors.\n",
3253 qdev->intr_count);
Ron Mercera4ab6132009-08-27 11:02:10 +00003254 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003255 }
3256 }
3257msi:
Ron Mercera4ab6132009-08-27 11:02:10 +00003258 qdev->intr_count = 1;
Ron Mercera5a62a12009-11-11 12:54:05 +00003259 if (qlge_irq_type == MSI_IRQ) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003260 if (!pci_enable_msi(qdev->pdev)) {
3261 set_bit(QL_MSI_ENABLED, &qdev->flags);
Joe Perchesae9540f2010-02-09 11:49:52 +00003262 netif_info(qdev, ifup, qdev->ndev,
3263 "Running with MSI interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003264 return;
3265 }
3266 }
Ron Mercera5a62a12009-11-11 12:54:05 +00003267 qlge_irq_type = LEG_IRQ;
Joe Perchesae9540f2010-02-09 11:49:52 +00003268 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3269 "Running with legacy interrupts.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003270}
3271
Ron Mercer39aa8162009-08-27 11:02:11 +00003272/* Each vector services 1 RSS ring and and 1 or more
3273 * TX completion rings. This function loops through
3274 * the TX completion rings and assigns the vector that
3275 * will service it. An example would be if there are
3276 * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3277 * This would mean that vector 0 would service RSS ring 0
3278 * and TX competion rings 0,1,2 and 3. Vector 1 would
3279 * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3280 */
3281static void ql_set_tx_vect(struct ql_adapter *qdev)
3282{
3283 int i, j, vect;
3284 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3285
3286 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3287 /* Assign irq vectors to TX rx_rings.*/
3288 for (vect = 0, j = 0, i = qdev->rss_ring_count;
3289 i < qdev->rx_ring_count; i++) {
3290 if (j == tx_rings_per_vector) {
3291 vect++;
3292 j = 0;
3293 }
3294 qdev->rx_ring[i].irq = vect;
3295 j++;
3296 }
3297 } else {
3298 /* For single vector all rings have an irq
3299 * of zero.
3300 */
3301 for (i = 0; i < qdev->rx_ring_count; i++)
3302 qdev->rx_ring[i].irq = 0;
3303 }
3304}
3305
3306/* Set the interrupt mask for this vector. Each vector
3307 * will service 1 RSS ring and 1 or more TX completion
3308 * rings. This function sets up a bit mask per vector
3309 * that indicates which rings it services.
3310 */
3311static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3312{
3313 int j, vect = ctx->intr;
3314 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3315
3316 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3317 /* Add the RSS ring serviced by this vector
3318 * to the mask.
3319 */
3320 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3321 /* Add the TX ring(s) serviced by this vector
3322 * to the mask. */
3323 for (j = 0; j < tx_rings_per_vector; j++) {
3324 ctx->irq_mask |=
3325 (1 << qdev->rx_ring[qdev->rss_ring_count +
3326 (vect * tx_rings_per_vector) + j].cq_id);
3327 }
3328 } else {
3329 /* For single vector we just shift each queue's
3330 * ID into the mask.
3331 */
3332 for (j = 0; j < qdev->rx_ring_count; j++)
3333 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3334 }
3335}
3336
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003337/*
3338 * Here we build the intr_context structures based on
3339 * our rx_ring count and intr vector count.
3340 * The intr_context structure is used to hook each vector
3341 * to possibly different handlers.
3342 */
3343static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3344{
3345 int i = 0;
3346 struct intr_context *intr_context = &qdev->intr_context[0];
3347
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003348 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3349 /* Each rx_ring has it's
3350 * own intr_context since we have separate
3351 * vectors for each queue.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003352 */
3353 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3354 qdev->rx_ring[i].irq = i;
3355 intr_context->intr = i;
3356 intr_context->qdev = qdev;
Ron Mercer39aa8162009-08-27 11:02:11 +00003357 /* Set up this vector's bit-mask that indicates
3358 * which queues it services.
3359 */
3360 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003361 /*
3362 * We set up each vectors enable/disable/read bits so
3363 * there's no bit/mask calculations in the critical path.
3364 */
3365 intr_context->intr_en_mask =
3366 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3367 INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3368 | i;
3369 intr_context->intr_dis_mask =
3370 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3371 INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3372 INTR_EN_IHD | i;
3373 intr_context->intr_read_mask =
3374 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3375 INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3376 i;
Ron Mercer39aa8162009-08-27 11:02:11 +00003377 if (i == 0) {
3378 /* The first vector/queue handles
3379 * broadcast/multicast, fatal errors,
3380 * and firmware events. This in addition
3381 * to normal inbound NAPI processing.
3382 */
3383 intr_context->handler = qlge_isr;
3384 sprintf(intr_context->name, "%s-rx-%d",
3385 qdev->ndev->name, i);
3386 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003387 /*
3388 * Inbound queues handle unicast frames only.
3389 */
3390 intr_context->handler = qlge_msix_rx_isr;
Jesper Dangaard Brouerc2249692009-01-09 03:14:47 +00003391 sprintf(intr_context->name, "%s-rx-%d",
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003392 qdev->ndev->name, i);
3393 }
3394 }
3395 } else {
3396 /*
3397 * All rx_rings use the same intr_context since
3398 * there is only one vector.
3399 */
3400 intr_context->intr = 0;
3401 intr_context->qdev = qdev;
3402 /*
3403 * We set up each vectors enable/disable/read bits so
3404 * there's no bit/mask calculations in the critical path.
3405 */
3406 intr_context->intr_en_mask =
3407 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3408 intr_context->intr_dis_mask =
3409 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3410 INTR_EN_TYPE_DISABLE;
3411 intr_context->intr_read_mask =
3412 INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3413 /*
3414 * Single interrupt means one handler for all rings.
3415 */
3416 intr_context->handler = qlge_isr;
3417 sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
Ron Mercer39aa8162009-08-27 11:02:11 +00003418 /* Set up this vector's bit-mask that indicates
3419 * which queues it services. In this case there is
3420 * a single vector so it will service all RSS and
3421 * TX completion rings.
3422 */
3423 ql_set_irq_mask(qdev, intr_context);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003424 }
Ron Mercer39aa8162009-08-27 11:02:11 +00003425 /* Tell the TX completion rings which MSIx vector
3426 * they will be using.
3427 */
3428 ql_set_tx_vect(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003429}
3430
3431static void ql_free_irq(struct ql_adapter *qdev)
3432{
3433 int i;
3434 struct intr_context *intr_context = &qdev->intr_context[0];
3435
3436 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3437 if (intr_context->hooked) {
3438 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3439 free_irq(qdev->msi_x_entry[i].vector,
3440 &qdev->rx_ring[i]);
Joe Perchesae9540f2010-02-09 11:49:52 +00003441 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3442 "freeing msix interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003443 } else {
3444 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
Joe Perchesae9540f2010-02-09 11:49:52 +00003445 netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3446 "freeing msi interrupt %d.\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003447 }
3448 }
3449 }
3450 ql_disable_msix(qdev);
3451}
3452
3453static int ql_request_irq(struct ql_adapter *qdev)
3454{
3455 int i;
3456 int status = 0;
3457 struct pci_dev *pdev = qdev->pdev;
3458 struct intr_context *intr_context = &qdev->intr_context[0];
3459
3460 ql_resolve_queues_to_irqs(qdev);
3461
3462 for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3463 atomic_set(&intr_context->irq_cnt, 0);
3464 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3465 status = request_irq(qdev->msi_x_entry[i].vector,
3466 intr_context->handler,
3467 0,
3468 intr_context->name,
3469 &qdev->rx_ring[i]);
3470 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003471 netif_err(qdev, ifup, qdev->ndev,
3472 "Failed request for MSIX interrupt %d.\n",
3473 i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003474 goto err_irq;
3475 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00003476 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3477 "Hooked intr %d, queue type %s, with name %s.\n",
3478 i,
3479 qdev->rx_ring[i].type == DEFAULT_Q ?
3480 "DEFAULT_Q" :
3481 qdev->rx_ring[i].type == TX_Q ?
3482 "TX_Q" :
3483 qdev->rx_ring[i].type == RX_Q ?
3484 "RX_Q" : "",
3485 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003486 }
3487 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00003488 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3489 "trying msi or legacy interrupts.\n");
3490 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3491 "%s: irq = %d.\n", __func__, pdev->irq);
3492 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3493 "%s: context->name = %s.\n", __func__,
3494 intr_context->name);
3495 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3496 "%s: dev_id = 0x%p.\n", __func__,
3497 &qdev->rx_ring[0]);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003498 status =
3499 request_irq(pdev->irq, qlge_isr,
3500 test_bit(QL_MSI_ENABLED,
3501 &qdev->
3502 flags) ? 0 : IRQF_SHARED,
3503 intr_context->name, &qdev->rx_ring[0]);
3504 if (status)
3505 goto err_irq;
3506
Joe Perchesae9540f2010-02-09 11:49:52 +00003507 netif_err(qdev, ifup, qdev->ndev,
3508 "Hooked intr %d, queue type %s, with name %s.\n",
3509 i,
3510 qdev->rx_ring[0].type == DEFAULT_Q ?
3511 "DEFAULT_Q" :
3512 qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3513 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3514 intr_context->name);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003515 }
3516 intr_context->hooked = 1;
3517 }
3518 return status;
3519err_irq:
Joe Perchesae9540f2010-02-09 11:49:52 +00003520 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003521 ql_free_irq(qdev);
3522 return status;
3523}
3524
3525static int ql_start_rss(struct ql_adapter *qdev)
3526{
Ron Mercer541ae282009-10-08 09:54:37 +00003527 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3528 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
3529 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
3530 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
3531 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
3532 0xbe, 0xac, 0x01, 0xfa};
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003533 struct ricb *ricb = &qdev->ricb;
3534 int status = 0;
3535 int i;
3536 u8 *hash_id = (u8 *) ricb->hash_cq_id;
3537
Ron Mercere3324712009-07-02 06:06:13 +00003538 memset((void *)ricb, 0, sizeof(*ricb));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003539
Ron Mercerb2014ff2009-08-27 11:02:09 +00003540 ricb->base_cq = RSS_L4K;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003541 ricb->flags =
Ron Mercer541ae282009-10-08 09:54:37 +00003542 (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3543 ricb->mask = cpu_to_le16((u16)(0x3ff));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003544
3545 /*
3546 * Fill out the Indirection Table.
3547 */
Ron Mercer541ae282009-10-08 09:54:37 +00003548 for (i = 0; i < 1024; i++)
3549 hash_id[i] = (i & (qdev->rss_ring_count - 1));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003550
Ron Mercer541ae282009-10-08 09:54:37 +00003551 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3552 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003553
Joe Perchesae9540f2010-02-09 11:49:52 +00003554 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003555
Ron Mercere3324712009-07-02 06:06:13 +00003556 status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003557 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003558 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003559 return status;
3560 }
Joe Perchesae9540f2010-02-09 11:49:52 +00003561 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3562 "Successfully loaded RICB.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003563 return status;
3564}
3565
Ron Mercera5f59dc2009-07-02 06:06:07 +00003566static int ql_clear_routing_entries(struct ql_adapter *qdev)
3567{
3568 int i, status = 0;
3569
3570 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3571 if (status)
3572 return status;
3573 /* Clear all the entries in the routing table. */
3574 for (i = 0; i < 16; i++) {
3575 status = ql_set_routing_reg(qdev, i, 0, 0);
3576 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003577 netif_err(qdev, ifup, qdev->ndev,
3578 "Failed to init routing register for CAM packets.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003579 break;
3580 }
3581 }
3582 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3583 return status;
3584}
3585
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003586/* Initialize the frame-to-queue routing. */
3587static int ql_route_initialize(struct ql_adapter *qdev)
3588{
3589 int status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003590
3591 /* Clear all the entries in the routing table. */
Ron Mercera5f59dc2009-07-02 06:06:07 +00003592 status = ql_clear_routing_entries(qdev);
3593 if (status)
Ron Mercerfd21cf52009-09-29 08:39:22 +00003594 return status;
3595
3596 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3597 if (status)
3598 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003599
3600 status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
3601 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003602 netif_err(qdev, ifup, qdev->ndev,
3603 "Failed to init routing register for error packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003604 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003605 }
3606 status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3607 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003608 netif_err(qdev, ifup, qdev->ndev,
3609 "Failed to init routing register for broadcast packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003610 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003611 }
3612 /* If we have more than one inbound queue, then turn on RSS in the
3613 * routing block.
3614 */
3615 if (qdev->rss_ring_count > 1) {
3616 status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3617 RT_IDX_RSS_MATCH, 1);
3618 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003619 netif_err(qdev, ifup, qdev->ndev,
3620 "Failed to init routing register for MATCH RSS packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003621 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003622 }
3623 }
3624
3625 status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3626 RT_IDX_CAM_HIT, 1);
Ron Mercer8587ea32009-02-23 10:42:15 +00003627 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003628 netif_err(qdev, ifup, qdev->ndev,
3629 "Failed to init routing register for CAM packets.\n");
Ron Mercer8587ea32009-02-23 10:42:15 +00003630exit:
3631 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003632 return status;
3633}
3634
Ron Mercer2ee1e272009-03-03 12:10:33 +00003635int ql_cam_route_initialize(struct ql_adapter *qdev)
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003636{
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003637 int status, set;
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003638
Ron Mercer7fab3bf2009-07-02 06:06:11 +00003639 /* If check if the link is up and use to
3640 * determine if we are setting or clearing
3641 * the MAC address in the CAM.
3642 */
3643 set = ql_read32(qdev, STS);
3644 set &= qdev->port_link_up;
3645 status = ql_set_mac_addr(qdev, set);
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003646 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003647 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003648 return status;
3649 }
3650
3651 status = ql_route_initialize(qdev);
3652 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003653 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003654
3655 return status;
3656}
3657
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003658static int ql_adapter_initialize(struct ql_adapter *qdev)
3659{
3660 u32 value, mask;
3661 int i;
3662 int status = 0;
3663
3664 /*
3665 * Set up the System register to halt on errors.
3666 */
3667 value = SYS_EFE | SYS_FAE;
3668 mask = value << 16;
3669 ql_write32(qdev, SYS, mask | value);
3670
Ron Mercerc9cf0a02009-03-09 10:59:22 +00003671 /* Set the default queue, and VLAN behavior. */
3672 value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3673 mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003674 ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3675
3676 /* Set the MPI interrupt to enabled. */
3677 ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3678
3679 /* Enable the function, set pagesize, enable error checking. */
3680 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
Ron Mercer572c5262010-01-02 10:37:42 +00003681 FSC_EC | FSC_VM_PAGE_4K;
3682 value |= SPLT_SETTING;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003683
3684 /* Set/clear header splitting. */
3685 mask = FSC_VM_PAGESIZE_MASK |
3686 FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3687 ql_write32(qdev, FSC, mask | value);
3688
Ron Mercer572c5262010-01-02 10:37:42 +00003689 ql_write32(qdev, SPLT_HDR, SPLT_LEN);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003690
Ron Mercera3b71932009-10-08 09:54:38 +00003691 /* Set RX packet routing to use port/pci function on which the
3692 * packet arrived on in addition to usual frame routing.
3693 * This is helpful on bonding where both interfaces can have
3694 * the same MAC address.
3695 */
3696 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003697 /* Reroute all packets to our Interface.
3698 * They may have been routed to MPI firmware
3699 * due to WOL.
3700 */
3701 value = ql_read32(qdev, MGMT_RCV_CFG);
3702 value &= ~MGMT_RCV_CFG_RM;
3703 mask = 0xffff0000;
3704
3705 /* Sticky reg needs clearing due to WOL. */
3706 ql_write32(qdev, MGMT_RCV_CFG, mask);
3707 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3708
3709 /* Default WOL is enable on Mezz cards */
3710 if (qdev->pdev->subsystem_device == 0x0068 ||
3711 qdev->pdev->subsystem_device == 0x0180)
3712 qdev->wol = WAKE_MAGIC;
Ron Mercera3b71932009-10-08 09:54:38 +00003713
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003714 /* Start up the rx queues. */
3715 for (i = 0; i < qdev->rx_ring_count; i++) {
3716 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3717 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003718 netif_err(qdev, ifup, qdev->ndev,
3719 "Failed to start rx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003720 return status;
3721 }
3722 }
3723
3724 /* If there is more than one inbound completion queue
3725 * then download a RICB to configure RSS.
3726 */
3727 if (qdev->rss_ring_count > 1) {
3728 status = ql_start_rss(qdev);
3729 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003730 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003731 return status;
3732 }
3733 }
3734
3735 /* Start up the tx queues. */
3736 for (i = 0; i < qdev->tx_ring_count; i++) {
3737 status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3738 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003739 netif_err(qdev, ifup, qdev->ndev,
3740 "Failed to start tx ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003741 return status;
3742 }
3743 }
3744
Ron Mercerb0c2aad2009-02-26 10:08:35 +00003745 /* Initialize the port and set the max framesize. */
3746 status = qdev->nic_ops->port_initialize(qdev);
Ron Mercer80928862009-10-10 09:35:09 +00003747 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003748 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003749
Ron Mercerbb58b5b2009-02-23 10:42:13 +00003750 /* Set up the MAC address and frame routing filter. */
3751 status = ql_cam_route_initialize(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003752 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003753 netif_err(qdev, ifup, qdev->ndev,
3754 "Failed to init CAM/Routing tables.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003755 return status;
3756 }
3757
3758 /* Start NAPI for the RSS queues. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003759 for (i = 0; i < qdev->rss_ring_count; i++) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003760 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3761 "Enabling NAPI for rx_ring[%d].\n", i);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003762 napi_enable(&qdev->rx_ring[i].napi);
3763 }
3764
3765 return status;
3766}
3767
3768/* Issue soft reset to chip. */
3769static int ql_adapter_reset(struct ql_adapter *qdev)
3770{
3771 u32 value;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003772 int status = 0;
Ron Mercera5f59dc2009-07-02 06:06:07 +00003773 unsigned long end_jiffies;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003774
Ron Mercera5f59dc2009-07-02 06:06:07 +00003775 /* Clear all the entries in the routing table. */
3776 status = ql_clear_routing_entries(qdev);
3777 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003778 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
Ron Mercera5f59dc2009-07-02 06:06:07 +00003779 return status;
3780 }
3781
3782 end_jiffies = jiffies +
3783 max((unsigned long)1, usecs_to_jiffies(30));
Ron Mercer84087f42009-10-08 09:54:41 +00003784
3785 /* Stop management traffic. */
3786 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3787
3788 /* Wait for the NIC and MGMNT FIFOs to empty. */
3789 ql_wait_fifo_empty(qdev);
3790
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003791 ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
Ron Mercera75ee7f2009-03-09 10:59:18 +00003792
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003793 do {
3794 value = ql_read32(qdev, RST_FO);
3795 if ((value & RST_FO_FR) == 0)
3796 break;
Ron Mercera75ee7f2009-03-09 10:59:18 +00003797 cpu_relax();
3798 } while (time_before(jiffies, end_jiffies));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003799
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003800 if (value & RST_FO_FR) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003801 netif_err(qdev, ifdown, qdev->ndev,
3802 "ETIMEDOUT!!! errored out of resetting the chip!\n");
Ron Mercera75ee7f2009-03-09 10:59:18 +00003803 status = -ETIMEDOUT;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003804 }
3805
Ron Mercer84087f42009-10-08 09:54:41 +00003806 /* Resume management traffic. */
3807 ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003808 return status;
3809}
3810
3811static void ql_display_dev_info(struct net_device *ndev)
3812{
3813 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
3814
Joe Perchesae9540f2010-02-09 11:49:52 +00003815 netif_info(qdev, probe, qdev->ndev,
3816 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3817 "XG Roll = %d, XG Rev = %d.\n",
3818 qdev->func,
3819 qdev->port,
3820 qdev->chip_rev_id & 0x0000000f,
3821 qdev->chip_rev_id >> 4 & 0x0000000f,
3822 qdev->chip_rev_id >> 8 & 0x0000000f,
3823 qdev->chip_rev_id >> 12 & 0x0000000f);
3824 netif_info(qdev, probe, qdev->ndev,
3825 "MAC address %pM\n", ndev->dev_addr);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003826}
3827
Ron Mercerbc083ce2009-10-21 11:07:40 +00003828int ql_wol(struct ql_adapter *qdev)
3829{
3830 int status = 0;
3831 u32 wol = MB_WOL_DISABLE;
3832
3833 /* The CAM is still intact after a reset, but if we
3834 * are doing WOL, then we may need to program the
3835 * routing regs. We would also need to issue the mailbox
3836 * commands to instruct the MPI what to do per the ethtool
3837 * settings.
3838 */
3839
3840 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3841 WAKE_MCAST | WAKE_BCAST)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003842 netif_err(qdev, ifdown, qdev->ndev,
3843 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3844 qdev->wol);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003845 return -EINVAL;
3846 }
3847
3848 if (qdev->wol & WAKE_MAGIC) {
3849 status = ql_mb_wol_set_magic(qdev, 1);
3850 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003851 netif_err(qdev, ifdown, qdev->ndev,
3852 "Failed to set magic packet on %s.\n",
3853 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003854 return status;
3855 } else
Joe Perchesae9540f2010-02-09 11:49:52 +00003856 netif_info(qdev, drv, qdev->ndev,
3857 "Enabled magic packet successfully on %s.\n",
3858 qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003859
3860 wol |= MB_WOL_MAGIC_PKT;
3861 }
3862
3863 if (qdev->wol) {
Ron Mercerbc083ce2009-10-21 11:07:40 +00003864 wol |= MB_WOL_MODE_ON;
3865 status = ql_mb_wol_mode(qdev, wol);
Joe Perchesae9540f2010-02-09 11:49:52 +00003866 netif_err(qdev, drv, qdev->ndev,
3867 "WOL %s (wol code 0x%x) on %s\n",
3868 (status == 0) ? "Sucessfully set" : "Failed",
3869 wol, qdev->ndev->name);
Ron Mercerbc083ce2009-10-21 11:07:40 +00003870 }
3871
3872 return status;
3873}
3874
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003875static int ql_adapter_down(struct ql_adapter *qdev)
3876{
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003877 int i, status = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003878
Ron Mercer6a473302009-07-02 06:06:12 +00003879 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003880
Ron Mercer6497b602009-02-12 16:37:13 -08003881 /* Don't kill the reset worker thread if we
3882 * are in the process of recovery.
3883 */
3884 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3885 cancel_delayed_work_sync(&qdev->asic_reset_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003886 cancel_delayed_work_sync(&qdev->mpi_reset_work);
3887 cancel_delayed_work_sync(&qdev->mpi_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00003888 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00003889 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00003890 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003891
Ron Mercer39aa8162009-08-27 11:02:11 +00003892 for (i = 0; i < qdev->rss_ring_count; i++)
3893 napi_disable(&qdev->rx_ring[i].napi);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003894
3895 clear_bit(QL_ADAPTER_UP, &qdev->flags);
3896
3897 ql_disable_interrupts(qdev);
3898
3899 ql_tx_ring_clean(qdev);
3900
Ron Mercer6b318cb2009-03-09 10:59:26 +00003901 /* Call netif_napi_del() from common point.
3902 */
Ron Mercerb2014ff2009-08-27 11:02:09 +00003903 for (i = 0; i < qdev->rss_ring_count; i++)
Ron Mercer6b318cb2009-03-09 10:59:26 +00003904 netif_napi_del(&qdev->rx_ring[i].napi);
3905
Ron Mercer4545a3f2009-02-23 10:42:17 +00003906 ql_free_rx_buffers(qdev);
David S. Miller2d6a5e92009-03-17 15:01:30 -07003907
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003908 status = ql_adapter_reset(qdev);
3909 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00003910 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3911 qdev->func);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003912 return status;
3913}
3914
3915static int ql_adapter_up(struct ql_adapter *qdev)
3916{
3917 int err = 0;
3918
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003919 err = ql_adapter_initialize(qdev);
3920 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003921 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003922 goto err_init;
3923 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003924 set_bit(QL_ADAPTER_UP, &qdev->flags);
Ron Mercer4545a3f2009-02-23 10:42:17 +00003925 ql_alloc_rx_buffers(qdev);
Ron Mercer8b007de2009-07-02 06:06:08 +00003926 /* If the port is initialized and the
3927 * link is up the turn on the carrier.
3928 */
3929 if ((ql_read32(qdev, STS) & qdev->port_init) &&
3930 (ql_read32(qdev, STS) & qdev->port_link_up))
Ron Mercer6a473302009-07-02 06:06:12 +00003931 ql_link_on(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003932 ql_enable_interrupts(qdev);
3933 ql_enable_all_completion_interrupts(qdev);
Ron Mercer1e213302009-03-09 10:59:21 +00003934 netif_tx_start_all_queues(qdev->ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003935
3936 return 0;
3937err_init:
3938 ql_adapter_reset(qdev);
3939 return err;
3940}
3941
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003942static void ql_release_adapter_resources(struct ql_adapter *qdev)
3943{
3944 ql_free_mem_resources(qdev);
3945 ql_free_irq(qdev);
3946}
3947
3948static int ql_get_adapter_resources(struct ql_adapter *qdev)
3949{
3950 int status = 0;
3951
3952 if (ql_alloc_mem_resources(qdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003953 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003954 return -ENOMEM;
3955 }
3956 status = ql_request_irq(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003957 return status;
3958}
3959
3960static int qlge_close(struct net_device *ndev)
3961{
3962 struct ql_adapter *qdev = netdev_priv(ndev);
3963
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003964 /* If we hit pci_channel_io_perm_failure
3965 * failure condition, then we already
3966 * brought the adapter down.
3967 */
3968 if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00003969 netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00003970 clear_bit(QL_EEH_FATAL, &qdev->flags);
3971 return 0;
3972 }
3973
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003974 /*
3975 * Wait for device to recover from a reset.
3976 * (Rarely happens, but possible.)
3977 */
3978 while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
3979 msleep(1);
3980 ql_adapter_down(qdev);
3981 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003982 return 0;
3983}
3984
3985static int ql_configure_rings(struct ql_adapter *qdev)
3986{
3987 int i;
3988 struct rx_ring *rx_ring;
3989 struct tx_ring *tx_ring;
Ron Mercera4ab6132009-08-27 11:02:10 +00003990 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
Ron Mercer7c734352009-10-19 03:32:19 +00003991 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3992 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3993
3994 qdev->lbq_buf_order = get_order(lbq_buf_len);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04003995
Ron Mercera4ab6132009-08-27 11:02:10 +00003996 /* In a perfect world we have one RSS ring for each CPU
3997 * and each has it's own vector. To do that we ask for
3998 * cpu_cnt vectors. ql_enable_msix() will adjust the
3999 * vector count to what we actually get. We then
4000 * allocate an RSS ring for each.
4001 * Essentially, we are doing min(cpu_count, msix_vector_count).
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004002 */
Ron Mercera4ab6132009-08-27 11:02:10 +00004003 qdev->intr_count = cpu_cnt;
4004 ql_enable_msix(qdev);
4005 /* Adjust the RSS ring count to the actual vector count. */
4006 qdev->rss_ring_count = qdev->intr_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004007 qdev->tx_ring_count = cpu_cnt;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004008 qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004009
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004010 for (i = 0; i < qdev->tx_ring_count; i++) {
4011 tx_ring = &qdev->tx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004012 memset((void *)tx_ring, 0, sizeof(*tx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004013 tx_ring->qdev = qdev;
4014 tx_ring->wq_id = i;
4015 tx_ring->wq_len = qdev->tx_ring_size;
4016 tx_ring->wq_size =
4017 tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4018
4019 /*
4020 * The completion queue ID for the tx rings start
Ron Mercer39aa8162009-08-27 11:02:11 +00004021 * immediately after the rss rings.
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004022 */
Ron Mercer39aa8162009-08-27 11:02:11 +00004023 tx_ring->cq_id = qdev->rss_ring_count + i;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004024 }
4025
4026 for (i = 0; i < qdev->rx_ring_count; i++) {
4027 rx_ring = &qdev->rx_ring[i];
Ron Mercere3324712009-07-02 06:06:13 +00004028 memset((void *)rx_ring, 0, sizeof(*rx_ring));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004029 rx_ring->qdev = qdev;
4030 rx_ring->cq_id = i;
4031 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
Ron Mercerb2014ff2009-08-27 11:02:09 +00004032 if (i < qdev->rss_ring_count) {
Ron Mercer39aa8162009-08-27 11:02:11 +00004033 /*
4034 * Inbound (RSS) queues.
4035 */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004036 rx_ring->cq_len = qdev->rx_ring_size;
4037 rx_ring->cq_size =
4038 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4039 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4040 rx_ring->lbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004041 rx_ring->lbq_len * sizeof(__le64);
Ron Mercer7c734352009-10-19 03:32:19 +00004042 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
Joe Perchesae9540f2010-02-09 11:49:52 +00004043 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4044 "lbq_buf_size %d, order = %d\n",
4045 rx_ring->lbq_buf_size,
4046 qdev->lbq_buf_order);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004047 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4048 rx_ring->sbq_size =
Ron Mercer2c9a0d42009-01-05 18:19:20 -08004049 rx_ring->sbq_len * sizeof(__le64);
Ron Mercer52e55f32009-10-10 09:35:07 +00004050 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
Ron Mercerb2014ff2009-08-27 11:02:09 +00004051 rx_ring->type = RX_Q;
4052 } else {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004053 /*
4054 * Outbound queue handles outbound completions only.
4055 */
4056 /* outbound cq is same size as tx_ring it services. */
4057 rx_ring->cq_len = qdev->tx_ring_size;
4058 rx_ring->cq_size =
4059 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4060 rx_ring->lbq_len = 0;
4061 rx_ring->lbq_size = 0;
4062 rx_ring->lbq_buf_size = 0;
4063 rx_ring->sbq_len = 0;
4064 rx_ring->sbq_size = 0;
4065 rx_ring->sbq_buf_size = 0;
4066 rx_ring->type = TX_Q;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004067 }
4068 }
4069 return 0;
4070}
4071
4072static int qlge_open(struct net_device *ndev)
4073{
4074 int err = 0;
4075 struct ql_adapter *qdev = netdev_priv(ndev);
4076
Ron Mercer74e12432009-11-11 12:54:04 +00004077 err = ql_adapter_reset(qdev);
4078 if (err)
4079 return err;
4080
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004081 err = ql_configure_rings(qdev);
4082 if (err)
4083 return err;
4084
4085 err = ql_get_adapter_resources(qdev);
4086 if (err)
4087 goto error_up;
4088
4089 err = ql_adapter_up(qdev);
4090 if (err)
4091 goto error_up;
4092
4093 return err;
4094
4095error_up:
4096 ql_release_adapter_resources(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004097 return err;
4098}
4099
Ron Mercer7c734352009-10-19 03:32:19 +00004100static int ql_change_rx_buffers(struct ql_adapter *qdev)
4101{
4102 struct rx_ring *rx_ring;
4103 int i, status;
4104 u32 lbq_buf_len;
4105
4106 /* Wait for an oustanding reset to complete. */
4107 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4108 int i = 3;
4109 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004110 netif_err(qdev, ifup, qdev->ndev,
4111 "Waiting for adapter UP...\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004112 ssleep(1);
4113 }
4114
4115 if (!i) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004116 netif_err(qdev, ifup, qdev->ndev,
4117 "Timed out waiting for adapter UP\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004118 return -ETIMEDOUT;
4119 }
4120 }
4121
4122 status = ql_adapter_down(qdev);
4123 if (status)
4124 goto error;
4125
4126 /* Get the new rx buffer size. */
4127 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4128 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4129 qdev->lbq_buf_order = get_order(lbq_buf_len);
4130
4131 for (i = 0; i < qdev->rss_ring_count; i++) {
4132 rx_ring = &qdev->rx_ring[i];
4133 /* Set the new size. */
4134 rx_ring->lbq_buf_size = lbq_buf_len;
4135 }
4136
4137 status = ql_adapter_up(qdev);
4138 if (status)
4139 goto error;
4140
4141 return status;
4142error:
Joe Perchesae9540f2010-02-09 11:49:52 +00004143 netif_alert(qdev, ifup, qdev->ndev,
4144 "Driver up/down cycle failed, closing device.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004145 set_bit(QL_ADAPTER_UP, &qdev->flags);
4146 dev_close(qdev->ndev);
4147 return status;
4148}
4149
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004150static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4151{
4152 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer7c734352009-10-19 03:32:19 +00004153 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004154
4155 if (ndev->mtu == 1500 && new_mtu == 9000) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004156 netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004157 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004158 netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004159 } else
4160 return -EINVAL;
Ron Mercer7c734352009-10-19 03:32:19 +00004161
4162 queue_delayed_work(qdev->workqueue,
4163 &qdev->mpi_port_cfg_work, 3*HZ);
4164
Breno Leitao746079d2010-02-04 10:11:19 +00004165 ndev->mtu = new_mtu;
4166
Ron Mercer7c734352009-10-19 03:32:19 +00004167 if (!netif_running(qdev->ndev)) {
Ron Mercer7c734352009-10-19 03:32:19 +00004168 return 0;
4169 }
4170
Ron Mercer7c734352009-10-19 03:32:19 +00004171 status = ql_change_rx_buffers(qdev);
4172 if (status) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004173 netif_err(qdev, ifup, qdev->ndev,
4174 "Changing MTU failed.\n");
Ron Mercer7c734352009-10-19 03:32:19 +00004175 }
4176
4177 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004178}
4179
4180static struct net_device_stats *qlge_get_stats(struct net_device
4181 *ndev)
4182{
Ron Mercer885ee392009-11-03 13:49:31 +00004183 struct ql_adapter *qdev = netdev_priv(ndev);
4184 struct rx_ring *rx_ring = &qdev->rx_ring[0];
4185 struct tx_ring *tx_ring = &qdev->tx_ring[0];
4186 unsigned long pkts, mcast, dropped, errors, bytes;
4187 int i;
4188
4189 /* Get RX stats. */
4190 pkts = mcast = dropped = errors = bytes = 0;
4191 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4192 pkts += rx_ring->rx_packets;
4193 bytes += rx_ring->rx_bytes;
4194 dropped += rx_ring->rx_dropped;
4195 errors += rx_ring->rx_errors;
4196 mcast += rx_ring->rx_multicast;
4197 }
4198 ndev->stats.rx_packets = pkts;
4199 ndev->stats.rx_bytes = bytes;
4200 ndev->stats.rx_dropped = dropped;
4201 ndev->stats.rx_errors = errors;
4202 ndev->stats.multicast = mcast;
4203
4204 /* Get TX stats. */
4205 pkts = errors = bytes = 0;
4206 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4207 pkts += tx_ring->tx_packets;
4208 bytes += tx_ring->tx_bytes;
4209 errors += tx_ring->tx_errors;
4210 }
4211 ndev->stats.tx_packets = pkts;
4212 ndev->stats.tx_bytes = bytes;
4213 ndev->stats.tx_errors = errors;
Ajit Khapardebcc90f52009-10-07 02:46:09 +00004214 return &ndev->stats;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004215}
4216
4217static void qlge_set_multicast_list(struct net_device *ndev)
4218{
4219 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4220 struct dev_mc_list *mc_ptr;
Ron Mercercc288f52009-02-23 10:42:14 +00004221 int i, status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004222
Ron Mercercc288f52009-02-23 10:42:14 +00004223 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4224 if (status)
4225 return;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004226 /*
4227 * Set or clear promiscuous mode if a
4228 * transition is taking place.
4229 */
4230 if (ndev->flags & IFF_PROMISC) {
4231 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4232 if (ql_set_routing_reg
4233 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004234 netif_err(qdev, hw, qdev->ndev,
4235 "Failed to set promiscous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004236 } else {
4237 set_bit(QL_PROMISCUOUS, &qdev->flags);
4238 }
4239 }
4240 } else {
4241 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4242 if (ql_set_routing_reg
4243 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004244 netif_err(qdev, hw, qdev->ndev,
4245 "Failed to clear promiscous mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004246 } else {
4247 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4248 }
4249 }
4250 }
4251
4252 /*
4253 * Set or clear all multicast mode if a
4254 * transition is taking place.
4255 */
4256 if ((ndev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004257 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004258 if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4259 if (ql_set_routing_reg
4260 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004261 netif_err(qdev, hw, qdev->ndev,
4262 "Failed to set all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004263 } else {
4264 set_bit(QL_ALLMULTI, &qdev->flags);
4265 }
4266 }
4267 } else {
4268 if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4269 if (ql_set_routing_reg
4270 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004271 netif_err(qdev, hw, qdev->ndev,
4272 "Failed to clear all-multi mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004273 } else {
4274 clear_bit(QL_ALLMULTI, &qdev->flags);
4275 }
4276 }
4277 }
4278
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00004279 if (!netdev_mc_empty(ndev)) {
Ron Mercercc288f52009-02-23 10:42:14 +00004280 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4281 if (status)
4282 goto exit;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004283 for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
4284 i++, mc_ptr = mc_ptr->next)
4285 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
4286 MAC_ADDR_TYPE_MULTI_MAC, i)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004287 netif_err(qdev, hw, qdev->ndev,
4288 "Failed to loadmulticast address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004289 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004290 goto exit;
4291 }
Ron Mercercc288f52009-02-23 10:42:14 +00004292 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004293 if (ql_set_routing_reg
4294 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004295 netif_err(qdev, hw, qdev->ndev,
4296 "Failed to set multicast match mode.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004297 } else {
4298 set_bit(QL_ALLMULTI, &qdev->flags);
4299 }
4300 }
4301exit:
Ron Mercer8587ea32009-02-23 10:42:15 +00004302 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004303}
4304
4305static int qlge_set_mac_address(struct net_device *ndev, void *p)
4306{
4307 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4308 struct sockaddr *addr = p;
Ron Mercercc288f52009-02-23 10:42:14 +00004309 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004310
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004311 if (!is_valid_ether_addr(addr->sa_data))
4312 return -EADDRNOTAVAIL;
4313 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4314
Ron Mercercc288f52009-02-23 10:42:14 +00004315 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4316 if (status)
4317 return status;
Ron Mercercc288f52009-02-23 10:42:14 +00004318 status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4319 MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
Ron Mercercc288f52009-02-23 10:42:14 +00004320 if (status)
Joe Perchesae9540f2010-02-09 11:49:52 +00004321 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
Ron Mercercc288f52009-02-23 10:42:14 +00004322 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4323 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004324}
4325
4326static void qlge_tx_timeout(struct net_device *ndev)
4327{
4328 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
Ron Mercer6497b602009-02-12 16:37:13 -08004329 ql_queue_asic_error(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004330}
4331
4332static void ql_asic_reset_work(struct work_struct *work)
4333{
4334 struct ql_adapter *qdev =
4335 container_of(work, struct ql_adapter, asic_reset_work.work);
Ron Mercerdb988122009-03-09 10:59:17 +00004336 int status;
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004337 rtnl_lock();
Ron Mercerdb988122009-03-09 10:59:17 +00004338 status = ql_adapter_down(qdev);
4339 if (status)
4340 goto error;
4341
4342 status = ql_adapter_up(qdev);
4343 if (status)
4344 goto error;
Ron Mercer2cd6dba2009-10-08 09:54:42 +00004345
4346 /* Restore rx mode. */
4347 clear_bit(QL_ALLMULTI, &qdev->flags);
4348 clear_bit(QL_PROMISCUOUS, &qdev->flags);
4349 qlge_set_multicast_list(qdev->ndev);
4350
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004351 rtnl_unlock();
Ron Mercerdb988122009-03-09 10:59:17 +00004352 return;
4353error:
Joe Perchesae9540f2010-02-09 11:49:52 +00004354 netif_alert(qdev, ifup, qdev->ndev,
4355 "Driver up/down cycle failed, closing device\n");
Ron Mercerf2c0d8d2009-09-29 08:39:24 +00004356
Ron Mercerdb988122009-03-09 10:59:17 +00004357 set_bit(QL_ADAPTER_UP, &qdev->flags);
4358 dev_close(qdev->ndev);
4359 rtnl_unlock();
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004360}
4361
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004362static struct nic_operations qla8012_nic_ops = {
4363 .get_flash = ql_get_8012_flash_params,
4364 .port_initialize = ql_8012_port_initialize,
4365};
4366
Ron Mercercdca8d02009-03-02 08:07:31 +00004367static struct nic_operations qla8000_nic_ops = {
4368 .get_flash = ql_get_8000_flash_params,
4369 .port_initialize = ql_8000_port_initialize,
4370};
4371
Ron Mercere4552f52009-06-09 05:39:32 +00004372/* Find the pcie function number for the other NIC
4373 * on this chip. Since both NIC functions share a
4374 * common firmware we have the lowest enabled function
4375 * do any common work. Examples would be resetting
4376 * after a fatal firmware error, or doing a firmware
4377 * coredump.
4378 */
4379static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004380{
Ron Mercere4552f52009-06-09 05:39:32 +00004381 int status = 0;
4382 u32 temp;
4383 u32 nic_func1, nic_func2;
4384
4385 status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4386 &temp);
4387 if (status)
4388 return status;
4389
4390 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4391 MPI_TEST_NIC_FUNC_MASK);
4392 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4393 MPI_TEST_NIC_FUNC_MASK);
4394
4395 if (qdev->func == nic_func1)
4396 qdev->alt_func = nic_func2;
4397 else if (qdev->func == nic_func2)
4398 qdev->alt_func = nic_func1;
4399 else
4400 status = -EIO;
4401
4402 return status;
4403}
4404
4405static int ql_get_board_info(struct ql_adapter *qdev)
4406{
4407 int status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004408 qdev->func =
4409 (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
Ron Mercere4552f52009-06-09 05:39:32 +00004410 if (qdev->func > 3)
4411 return -EIO;
4412
4413 status = ql_get_alt_pcie_func(qdev);
4414 if (status)
4415 return status;
4416
4417 qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4418 if (qdev->port) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004419 qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4420 qdev->port_link_up = STS_PL1;
4421 qdev->port_init = STS_PI1;
4422 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4423 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4424 } else {
4425 qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4426 qdev->port_link_up = STS_PL0;
4427 qdev->port_init = STS_PI0;
4428 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4429 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4430 }
4431 qdev->chip_rev_id = ql_read32(qdev, REV_ID);
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004432 qdev->device_id = qdev->pdev->device;
4433 if (qdev->device_id == QLGE_DEVICE_ID_8012)
4434 qdev->nic_ops = &qla8012_nic_ops;
Ron Mercercdca8d02009-03-02 08:07:31 +00004435 else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4436 qdev->nic_ops = &qla8000_nic_ops;
Ron Mercere4552f52009-06-09 05:39:32 +00004437 return status;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004438}
4439
4440static void ql_release_all(struct pci_dev *pdev)
4441{
4442 struct net_device *ndev = pci_get_drvdata(pdev);
4443 struct ql_adapter *qdev = netdev_priv(ndev);
4444
4445 if (qdev->workqueue) {
4446 destroy_workqueue(qdev->workqueue);
4447 qdev->workqueue = NULL;
4448 }
Ron Mercer39aa8162009-08-27 11:02:11 +00004449
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004450 if (qdev->reg_base)
Stephen Hemminger8668ae92008-11-21 17:29:50 -08004451 iounmap(qdev->reg_base);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004452 if (qdev->doorbell_area)
4453 iounmap(qdev->doorbell_area);
Ron Mercer8aae2602010-01-15 13:31:28 +00004454 vfree(qdev->mpi_coredump);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004455 pci_release_regions(pdev);
4456 pci_set_drvdata(pdev, NULL);
4457}
4458
4459static int __devinit ql_init_device(struct pci_dev *pdev,
4460 struct net_device *ndev, int cards_found)
4461{
4462 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer1d1023d2009-10-10 09:35:03 +00004463 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004464
Ron Mercere3324712009-07-02 06:06:13 +00004465 memset((void *)qdev, 0, sizeof(*qdev));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004466 err = pci_enable_device(pdev);
4467 if (err) {
4468 dev_err(&pdev->dev, "PCI device enable failed.\n");
4469 return err;
4470 }
4471
Ron Mercerebd6e772009-09-29 08:39:25 +00004472 qdev->ndev = ndev;
4473 qdev->pdev = pdev;
4474 pci_set_drvdata(pdev, ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004475
Ron Mercerbc9167f2009-10-10 09:35:04 +00004476 /* Set PCIe read request size */
4477 err = pcie_set_readrq(pdev, 4096);
4478 if (err) {
4479 dev_err(&pdev->dev, "Set readrq failed.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004480 goto err_out1;
Ron Mercerbc9167f2009-10-10 09:35:04 +00004481 }
4482
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004483 err = pci_request_regions(pdev, DRV_NAME);
4484 if (err) {
4485 dev_err(&pdev->dev, "PCI region request failed.\n");
Ron Mercerebd6e772009-09-29 08:39:25 +00004486 return err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004487 }
4488
4489 pci_set_master(pdev);
Yang Hongyang6a355282009-04-06 19:01:13 -07004490 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004491 set_bit(QL_DMA64, &qdev->flags);
Yang Hongyang6a355282009-04-06 19:01:13 -07004492 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004493 } else {
Yang Hongyang284901a2009-04-06 19:01:15 -07004494 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004495 if (!err)
Yang Hongyang284901a2009-04-06 19:01:15 -07004496 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004497 }
4498
4499 if (err) {
4500 dev_err(&pdev->dev, "No usable DMA configuration.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004501 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004502 }
4503
Ron Mercer73475332009-11-06 07:44:58 +00004504 /* Set PCIe reset type for EEH to fundamental. */
4505 pdev->needs_freset = 1;
Ron Mercer6d190c62009-10-28 08:39:20 +00004506 pci_save_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004507 qdev->reg_base =
4508 ioremap_nocache(pci_resource_start(pdev, 1),
4509 pci_resource_len(pdev, 1));
4510 if (!qdev->reg_base) {
4511 dev_err(&pdev->dev, "Register mapping failed.\n");
4512 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004513 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004514 }
4515
4516 qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4517 qdev->doorbell_area =
4518 ioremap_nocache(pci_resource_start(pdev, 3),
4519 pci_resource_len(pdev, 3));
4520 if (!qdev->doorbell_area) {
4521 dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4522 err = -ENOMEM;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004523 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004524 }
4525
Ron Mercere4552f52009-06-09 05:39:32 +00004526 err = ql_get_board_info(qdev);
4527 if (err) {
4528 dev_err(&pdev->dev, "Register access failed.\n");
4529 err = -EIO;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004530 goto err_out2;
Ron Mercere4552f52009-06-09 05:39:32 +00004531 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004532 qdev->msg_enable = netif_msg_init(debug, default_msg);
4533 spin_lock_init(&qdev->hw_lock);
4534 spin_lock_init(&qdev->stats_lock);
4535
Ron Mercer8aae2602010-01-15 13:31:28 +00004536 if (qlge_mpi_coredump) {
4537 qdev->mpi_coredump =
4538 vmalloc(sizeof(struct ql_mpi_coredump));
4539 if (qdev->mpi_coredump == NULL) {
4540 dev_err(&pdev->dev, "Coredump alloc failed.\n");
4541 err = -ENOMEM;
Stephen Rothwellce96bc82010-01-28 06:13:13 -08004542 goto err_out2;
Ron Mercer8aae2602010-01-15 13:31:28 +00004543 }
Ron Mercerd5c1da52010-01-15 13:31:34 +00004544 if (qlge_force_coredump)
4545 set_bit(QL_FRC_COREDUMP, &qdev->flags);
Ron Mercer8aae2602010-01-15 13:31:28 +00004546 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004547 /* make sure the EEPROM is good */
Ron Mercerb0c2aad2009-02-26 10:08:35 +00004548 err = qdev->nic_ops->get_flash(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004549 if (err) {
4550 dev_err(&pdev->dev, "Invalid FLASH.\n");
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004551 goto err_out2;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004552 }
4553
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004554 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4555
4556 /* Set up the default ring sizes. */
4557 qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4558 qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4559
4560 /* Set up the coalescing parameters. */
4561 qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4562 qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4563 qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4564 qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4565
4566 /*
4567 * Set up the operating parameters.
4568 */
4569 qdev->rx_csum = 1;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004570 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4571 INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4572 INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4573 INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004574 INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
Ron Mercer2ee1e272009-03-03 12:10:33 +00004575 INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004576 INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
Ron Mercerbcc2cb32009-03-02 08:07:32 +00004577 init_completion(&qdev->ide_completion);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004578
4579 if (!cards_found) {
4580 dev_info(&pdev->dev, "%s\n", DRV_STRING);
4581 dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4582 DRV_NAME, DRV_VERSION);
4583 }
4584 return 0;
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004585err_out2:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004586 ql_release_all(pdev);
Breno Leitao4f9a91c2010-01-25 15:46:58 -08004587err_out1:
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004588 pci_disable_device(pdev);
4589 return err;
4590}
4591
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004592static const struct net_device_ops qlge_netdev_ops = {
4593 .ndo_open = qlge_open,
4594 .ndo_stop = qlge_close,
4595 .ndo_start_xmit = qlge_send,
4596 .ndo_change_mtu = qlge_change_mtu,
4597 .ndo_get_stats = qlge_get_stats,
4598 .ndo_set_multicast_list = qlge_set_multicast_list,
4599 .ndo_set_mac_address = qlge_set_mac_address,
4600 .ndo_validate_addr = eth_validate_addr,
4601 .ndo_tx_timeout = qlge_tx_timeout,
Ron Mercer01e6b952009-10-30 12:13:34 +00004602 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4603 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4604 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004605};
4606
Ron Mercer15c052f2010-02-04 13:32:46 -08004607static void ql_timer(unsigned long data)
4608{
4609 struct ql_adapter *qdev = (struct ql_adapter *)data;
4610 u32 var = 0;
4611
4612 var = ql_read32(qdev, STS);
4613 if (pci_channel_offline(qdev->pdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004614 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
Ron Mercer15c052f2010-02-04 13:32:46 -08004615 return;
4616 }
4617
4618 qdev->timer.expires = jiffies + (5*HZ);
4619 add_timer(&qdev->timer);
4620}
4621
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004622static int __devinit qlge_probe(struct pci_dev *pdev,
4623 const struct pci_device_id *pci_entry)
4624{
4625 struct net_device *ndev = NULL;
4626 struct ql_adapter *qdev = NULL;
4627 static int cards_found = 0;
4628 int err = 0;
4629
Ron Mercer1e213302009-03-09 10:59:21 +00004630 ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4631 min(MAX_CPUS, (int)num_online_cpus()));
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004632 if (!ndev)
4633 return -ENOMEM;
4634
4635 err = ql_init_device(pdev, ndev, cards_found);
4636 if (err < 0) {
4637 free_netdev(ndev);
4638 return err;
4639 }
4640
4641 qdev = netdev_priv(ndev);
4642 SET_NETDEV_DEV(ndev, &pdev->dev);
4643 ndev->features = (0
4644 | NETIF_F_IP_CSUM
4645 | NETIF_F_SG
4646 | NETIF_F_TSO
4647 | NETIF_F_TSO6
4648 | NETIF_F_TSO_ECN
4649 | NETIF_F_HW_VLAN_TX
4650 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
Ron Mercer22bdd4f2009-03-09 10:59:20 +00004651 ndev->features |= NETIF_F_GRO;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004652
4653 if (test_bit(QL_DMA64, &qdev->flags))
4654 ndev->features |= NETIF_F_HIGHDMA;
4655
4656 /*
4657 * Set up net_device structure.
4658 */
4659 ndev->tx_queue_len = qdev->tx_ring_size;
4660 ndev->irq = pdev->irq;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004661
4662 ndev->netdev_ops = &qlge_netdev_ops;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004663 SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004664 ndev->watchdog_timeo = 10 * HZ;
Stephen Hemminger25ed7842008-11-21 17:29:16 -08004665
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004666 err = register_netdev(ndev);
4667 if (err) {
4668 dev_err(&pdev->dev, "net device registration failed.\n");
4669 ql_release_all(pdev);
4670 pci_disable_device(pdev);
4671 return err;
4672 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004673 /* Start up the timer to trigger EEH if
4674 * the bus goes dead
4675 */
4676 init_timer_deferrable(&qdev->timer);
4677 qdev->timer.data = (unsigned long)qdev;
4678 qdev->timer.function = ql_timer;
4679 qdev->timer.expires = jiffies + (5*HZ);
4680 add_timer(&qdev->timer);
Ron Mercer6a473302009-07-02 06:06:12 +00004681 ql_link_off(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004682 ql_display_dev_info(ndev);
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004683 atomic_set(&qdev->lb_count, 0);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004684 cards_found++;
4685 return 0;
4686}
4687
Ron Mercer9dfbbaa2009-10-30 12:13:33 +00004688netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4689{
4690 return qlge_send(skb, ndev);
4691}
4692
4693int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4694{
4695 return ql_clean_inbound_rx_ring(rx_ring, budget);
4696}
4697
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004698static void __devexit qlge_remove(struct pci_dev *pdev)
4699{
4700 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004701 struct ql_adapter *qdev = netdev_priv(ndev);
4702 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004703 unregister_netdev(ndev);
4704 ql_release_all(pdev);
4705 pci_disable_device(pdev);
4706 free_netdev(ndev);
4707}
4708
Ron Mercer6d190c62009-10-28 08:39:20 +00004709/* Clean up resources without touching hardware. */
4710static void ql_eeh_close(struct net_device *ndev)
4711{
4712 int i;
4713 struct ql_adapter *qdev = netdev_priv(ndev);
4714
4715 if (netif_carrier_ok(ndev)) {
4716 netif_carrier_off(ndev);
4717 netif_stop_queue(ndev);
4718 }
4719
4720 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4721 cancel_delayed_work_sync(&qdev->asic_reset_work);
4722 cancel_delayed_work_sync(&qdev->mpi_reset_work);
4723 cancel_delayed_work_sync(&qdev->mpi_work);
4724 cancel_delayed_work_sync(&qdev->mpi_idc_work);
Ron Mercer8aae2602010-01-15 13:31:28 +00004725 cancel_delayed_work_sync(&qdev->mpi_core_to_log);
Ron Mercer6d190c62009-10-28 08:39:20 +00004726 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
4727
4728 for (i = 0; i < qdev->rss_ring_count; i++)
4729 netif_napi_del(&qdev->rx_ring[i].napi);
4730
4731 clear_bit(QL_ADAPTER_UP, &qdev->flags);
4732 ql_tx_ring_clean(qdev);
4733 ql_free_rx_buffers(qdev);
4734 ql_release_adapter_resources(qdev);
4735}
4736
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004737/*
4738 * This callback is called by the PCI subsystem whenever
4739 * a PCI bus error is detected.
4740 */
4741static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4742 enum pci_channel_state state)
4743{
4744 struct net_device *ndev = pci_get_drvdata(pdev);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004745 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004746
Ron Mercer6d190c62009-10-28 08:39:20 +00004747 switch (state) {
4748 case pci_channel_io_normal:
4749 return PCI_ERS_RESULT_CAN_RECOVER;
4750 case pci_channel_io_frozen:
4751 netif_device_detach(ndev);
4752 if (netif_running(ndev))
4753 ql_eeh_close(ndev);
4754 pci_disable_device(pdev);
4755 return PCI_ERS_RESULT_NEED_RESET;
4756 case pci_channel_io_perm_failure:
4757 dev_err(&pdev->dev,
4758 "%s: pci_channel_io_perm_failure.\n", __func__);
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004759 ql_eeh_close(ndev);
4760 set_bit(QL_EEH_FATAL, &qdev->flags);
Dean Nelsonfbc663c2009-07-31 09:13:48 +00004761 return PCI_ERS_RESULT_DISCONNECT;
Ron Mercer6d190c62009-10-28 08:39:20 +00004762 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004763
4764 /* Request a slot reset. */
4765 return PCI_ERS_RESULT_NEED_RESET;
4766}
4767
4768/*
4769 * This callback is called after the PCI buss has been reset.
4770 * Basically, this tries to restart the card from scratch.
4771 * This is a shortened version of the device probe/discovery code,
4772 * it resembles the first-half of the () routine.
4773 */
4774static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4775{
4776 struct net_device *ndev = pci_get_drvdata(pdev);
4777 struct ql_adapter *qdev = netdev_priv(ndev);
4778
Ron Mercer6d190c62009-10-28 08:39:20 +00004779 pdev->error_state = pci_channel_io_normal;
4780
4781 pci_restore_state(pdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004782 if (pci_enable_device(pdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004783 netif_err(qdev, ifup, qdev->ndev,
4784 "Cannot re-enable PCI device after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004785 return PCI_ERS_RESULT_DISCONNECT;
4786 }
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004787 pci_set_master(pdev);
Ron Mercera112fd42010-02-03 07:24:11 +00004788
4789 if (ql_adapter_reset(qdev)) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004790 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
Ron Mercer4bbd1a12010-02-03 07:24:12 +00004791 set_bit(QL_EEH_FATAL, &qdev->flags);
Ron Mercera112fd42010-02-03 07:24:11 +00004792 return PCI_ERS_RESULT_DISCONNECT;
4793 }
4794
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004795 return PCI_ERS_RESULT_RECOVERED;
4796}
4797
4798static void qlge_io_resume(struct pci_dev *pdev)
4799{
4800 struct net_device *ndev = pci_get_drvdata(pdev);
4801 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6d190c62009-10-28 08:39:20 +00004802 int err = 0;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004803
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004804 if (netif_running(ndev)) {
Ron Mercer6d190c62009-10-28 08:39:20 +00004805 err = qlge_open(ndev);
4806 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004807 netif_err(qdev, ifup, qdev->ndev,
4808 "Device initialization failed after reset.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004809 return;
4810 }
Ron Mercer6d190c62009-10-28 08:39:20 +00004811 } else {
Joe Perchesae9540f2010-02-09 11:49:52 +00004812 netif_err(qdev, ifup, qdev->ndev,
4813 "Device was not running prior to EEH.\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004814 }
Ron Mercer15c052f2010-02-04 13:32:46 -08004815 qdev->timer.expires = jiffies + (5*HZ);
4816 add_timer(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004817 netif_device_attach(ndev);
4818}
4819
4820static struct pci_error_handlers qlge_err_handler = {
4821 .error_detected = qlge_io_error_detected,
4822 .slot_reset = qlge_io_slot_reset,
4823 .resume = qlge_io_resume,
4824};
4825
4826static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4827{
4828 struct net_device *ndev = pci_get_drvdata(pdev);
4829 struct ql_adapter *qdev = netdev_priv(ndev);
Ron Mercer6b318cb2009-03-09 10:59:26 +00004830 int err;
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004831
4832 netif_device_detach(ndev);
Ron Mercer15c052f2010-02-04 13:32:46 -08004833 del_timer_sync(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004834
4835 if (netif_running(ndev)) {
4836 err = ql_adapter_down(qdev);
4837 if (!err)
4838 return err;
4839 }
4840
Ron Mercerbc083ce2009-10-21 11:07:40 +00004841 ql_wol(qdev);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004842 err = pci_save_state(pdev);
4843 if (err)
4844 return err;
4845
4846 pci_disable_device(pdev);
4847
4848 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4849
4850 return 0;
4851}
4852
David S. Miller04da2cf2008-09-19 16:14:24 -07004853#ifdef CONFIG_PM
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004854static int qlge_resume(struct pci_dev *pdev)
4855{
4856 struct net_device *ndev = pci_get_drvdata(pdev);
4857 struct ql_adapter *qdev = netdev_priv(ndev);
4858 int err;
4859
4860 pci_set_power_state(pdev, PCI_D0);
4861 pci_restore_state(pdev);
4862 err = pci_enable_device(pdev);
4863 if (err) {
Joe Perchesae9540f2010-02-09 11:49:52 +00004864 netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004865 return err;
4866 }
4867 pci_set_master(pdev);
4868
4869 pci_enable_wake(pdev, PCI_D3hot, 0);
4870 pci_enable_wake(pdev, PCI_D3cold, 0);
4871
4872 if (netif_running(ndev)) {
4873 err = ql_adapter_up(qdev);
4874 if (err)
4875 return err;
4876 }
4877
Ron Mercer15c052f2010-02-04 13:32:46 -08004878 qdev->timer.expires = jiffies + (5*HZ);
4879 add_timer(&qdev->timer);
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004880 netif_device_attach(ndev);
4881
4882 return 0;
4883}
David S. Miller04da2cf2008-09-19 16:14:24 -07004884#endif /* CONFIG_PM */
Ron Mercerc4e84bd2008-09-18 11:56:28 -04004885
4886static void qlge_shutdown(struct pci_dev *pdev)
4887{
4888 qlge_suspend(pdev, PMSG_SUSPEND);
4889}
4890
4891static struct pci_driver qlge_driver = {
4892 .name = DRV_NAME,
4893 .id_table = qlge_pci_tbl,
4894 .probe = qlge_probe,
4895 .remove = __devexit_p(qlge_remove),
4896#ifdef CONFIG_PM
4897 .suspend = qlge_suspend,
4898 .resume = qlge_resume,
4899#endif
4900 .shutdown = qlge_shutdown,
4901 .err_handler = &qlge_err_handler
4902};
4903
4904static int __init qlge_init_module(void)
4905{
4906 return pci_register_driver(&qlge_driver);
4907}
4908
4909static void __exit qlge_exit(void)
4910{
4911 pci_unregister_driver(&qlge_driver);
4912}
4913
4914module_init(qlge_init_module);
4915module_exit(qlge_exit);