blob: d6f666ae38d3780baa31230a1226c4dcb75f0342 [file] [log] [blame]
Auke Kok9a799d72007-09-15 14:07:45 -07001/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004 Copyright(c) 1999 - 2008 Intel Corporation.
Auke Kok9a799d72007-09-15 14:07:45 -07005
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
Auke Kok9a799d72007-09-15 14:07:45 -070023 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/types.h>
29#include <linux/module.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/vmalloc.h>
33#include <linux/string.h>
34#include <linux/in.h>
35#include <linux/ip.h>
36#include <linux/tcp.h>
37#include <linux/ipv6.h>
38#include <net/checksum.h>
39#include <net/ip6_checksum.h>
40#include <linux/ethtool.h>
41#include <linux/if_vlan.h>
42
43#include "ixgbe.h"
44#include "ixgbe_common.h"
45
46char ixgbe_driver_name[] = "ixgbe";
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070047static const char ixgbe_driver_string[] =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070048 "Intel(R) 10 Gigabit PCI Express Network Driver";
Auke Kok9a799d72007-09-15 14:07:45 -070049
Jeff Kirsher51ac6442008-09-11 20:03:55 -070050#define DRV_VERSION "1.3.30-k2"
Stephen Hemminger9c8eb722007-10-29 10:46:24 -070051const char ixgbe_driver_version[] = DRV_VERSION;
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070052static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation.";
Auke Kok9a799d72007-09-15 14:07:45 -070053
54static const struct ixgbe_info *ixgbe_info_tbl[] = {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070055 [board_82598] = &ixgbe_82598_info,
Auke Kok9a799d72007-09-15 14:07:45 -070056};
57
58/* ixgbe_pci_tbl - PCI Device ID Table
59 *
60 * Wildcard entries (PCI_ANY_ID) should come last
61 * Last entry must be all 0s
62 *
63 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
64 * Class, Class Mask, private data (not used) }
65 */
66static struct pci_device_id ixgbe_pci_tbl[] = {
67 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070068 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070069 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
Auke Kok3957d632007-10-31 15:22:10 -070070 board_82598 },
Jesse Brandeburg0befdb32008-10-31 00:46:40 -070071 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
72 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070073 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
Auke Kok3957d632007-10-31 15:22:10 -070074 board_82598 },
Jesse Brandeburg8d792cd2008-08-08 16:24:19 -070075 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
76 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080077 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
78 board_82598 },
79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
80 board_82598 },
Jesse Brandeburgb95f5fc2008-09-11 19:58:59 -070081 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
82 board_82598 },
Donald Skidmorec4900be2008-11-20 21:11:42 -080083 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
84 board_82598 },
Auke Kok9a799d72007-09-15 14:07:45 -070085
86 /* required last entry */
87 {0, }
88};
89MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
90
Jeff Garzik5dd2d332008-10-16 05:09:31 -040091#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -080092static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -070093 void *p);
Jeb Cramerbd0362d2008-03-03 15:04:02 -080094static struct notifier_block dca_notifier = {
95 .notifier_call = ixgbe_notify_dca,
96 .next = NULL,
97 .priority = 0
98};
99#endif
100
Auke Kok9a799d72007-09-15 14:07:45 -0700101MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
102MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
103MODULE_LICENSE("GPL");
104MODULE_VERSION(DRV_VERSION);
105
106#define DEFAULT_DEBUG_LEVEL_SHIFT 3
107
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800108static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
109{
110 u32 ctrl_ext;
111
112 /* Let firmware take over control of h/w */
113 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
114 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700115 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800116}
117
118static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
119{
120 u32 ctrl_ext;
121
122 /* Let firmware know the driver has taken over */
123 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
124 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700125 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -0800126}
Auke Kok9a799d72007-09-15 14:07:45 -0700127
Auke Kok9a799d72007-09-15 14:07:45 -0700128static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700129 u8 msix_vector)
Auke Kok9a799d72007-09-15 14:07:45 -0700130{
131 u32 ivar, index;
132
133 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
134 index = (int_alloc_entry >> 2) & 0x1F;
135 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR(index));
136 ivar &= ~(0xFF << (8 * (int_alloc_entry & 0x3)));
137 ivar |= (msix_vector << (8 * (int_alloc_entry & 0x3)));
138 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
139}
140
141static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700142 struct ixgbe_tx_buffer
143 *tx_buffer_info)
Auke Kok9a799d72007-09-15 14:07:45 -0700144{
145 if (tx_buffer_info->dma) {
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700146 pci_unmap_page(adapter->pdev, tx_buffer_info->dma,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700147 tx_buffer_info->length, PCI_DMA_TODEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700148 tx_buffer_info->dma = 0;
149 }
150 if (tx_buffer_info->skb) {
151 dev_kfree_skb_any(tx_buffer_info->skb);
152 tx_buffer_info->skb = NULL;
153 }
154 /* tx_buffer_info must be completely set up in the transmit path */
155}
156
157static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700158 struct ixgbe_ring *tx_ring,
159 unsigned int eop)
Auke Kok9a799d72007-09-15 14:07:45 -0700160{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700161 struct ixgbe_hw *hw = &adapter->hw;
162 u32 head, tail;
163
Auke Kok9a799d72007-09-15 14:07:45 -0700164 /* Detect a transmit hang in hardware, this serializes the
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700165 * check with the clearing of time_stamp and movement of eop */
166 head = IXGBE_READ_REG(hw, tx_ring->head);
167 tail = IXGBE_READ_REG(hw, tx_ring->tail);
Auke Kok9a799d72007-09-15 14:07:45 -0700168 adapter->detect_tx_hung = false;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700169 if ((head != tail) &&
170 tx_ring->tx_buffer_info[eop].time_stamp &&
Auke Kok9a799d72007-09-15 14:07:45 -0700171 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
172 !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
173 /* detected Tx unit hang */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700174 union ixgbe_adv_tx_desc *tx_desc;
175 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
Auke Kok9a799d72007-09-15 14:07:45 -0700176 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700177 " Tx Queue <%d>\n"
178 " TDH, TDT <%x>, <%x>\n"
Auke Kok9a799d72007-09-15 14:07:45 -0700179 " next_to_use <%x>\n"
180 " next_to_clean <%x>\n"
181 "tx_buffer_info[next_to_clean]\n"
182 " time_stamp <%lx>\n"
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700183 " jiffies <%lx>\n",
184 tx_ring->queue_index,
185 head, tail,
186 tx_ring->next_to_use, eop,
187 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
Auke Kok9a799d72007-09-15 14:07:45 -0700188 return true;
189 }
190
191 return false;
192}
193
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700194#define IXGBE_MAX_TXD_PWR 14
195#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800196
197/* Tx Descriptors needed, worst case */
198#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
199 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
200#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700201 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800202
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700203#define GET_TX_HEAD_FROM_RING(ring) (\
204 *(volatile u32 *) \
205 ((union ixgbe_adv_tx_desc *)(ring)->desc + (ring)->count))
206static void ixgbe_tx_timeout(struct net_device *netdev);
207
Auke Kok9a799d72007-09-15 14:07:45 -0700208/**
209 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
210 * @adapter: board private structure
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700211 * @tx_ring: tx ring to clean
Auke Kok9a799d72007-09-15 14:07:45 -0700212 **/
213static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700214 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -0700215{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700216 union ixgbe_adv_tx_desc *tx_desc;
Auke Kok9a799d72007-09-15 14:07:45 -0700217 struct ixgbe_tx_buffer *tx_buffer_info;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700218 struct net_device *netdev = adapter->netdev;
219 struct sk_buff *skb;
220 unsigned int i;
221 u32 head, oldhead;
222 unsigned int count = 0;
223 unsigned int total_bytes = 0, total_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700224
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700225 rmb();
226 head = GET_TX_HEAD_FROM_RING(tx_ring);
227 head = le32_to_cpu(head);
Auke Kok9a799d72007-09-15 14:07:45 -0700228 i = tx_ring->next_to_clean;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700229 while (1) {
230 while (i != head) {
Auke Kok9a799d72007-09-15 14:07:45 -0700231 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
232 tx_buffer_info = &tx_ring->tx_buffer_info[i];
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700233 skb = tx_buffer_info->skb;
Auke Kok9a799d72007-09-15 14:07:45 -0700234
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700235 if (skb) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800236 unsigned int segs, bytecount;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700237
238 /* gso_segs is currently only valid for tcp */
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800239 segs = skb_shinfo(skb)->gso_segs ?: 1;
240 /* multiply data chunks by size of headers */
241 bytecount = ((segs - 1) * skb_headlen(skb)) +
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700242 skb->len;
243 total_packets += segs;
244 total_bytes += bytecount;
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800245 }
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700246
Auke Kok9a799d72007-09-15 14:07:45 -0700247 ixgbe_unmap_and_free_tx_resource(adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700248 tx_buffer_info);
Auke Kok9a799d72007-09-15 14:07:45 -0700249
250 i++;
251 if (i == tx_ring->count)
252 i = 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700253
254 count++;
255 if (count == tx_ring->count)
256 goto done_cleaning;
Auke Kok9a799d72007-09-15 14:07:45 -0700257 }
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700258 oldhead = head;
259 rmb();
260 head = GET_TX_HEAD_FROM_RING(tx_ring);
261 head = le32_to_cpu(head);
262 if (head == oldhead)
263 goto done_cleaning;
264 } /* while (1) */
Auke Kok9a799d72007-09-15 14:07:45 -0700265
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700266done_cleaning:
Auke Kok9a799d72007-09-15 14:07:45 -0700267 tx_ring->next_to_clean = i;
268
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800269#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700270 if (unlikely(count && netif_carrier_ok(netdev) &&
271 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800272 /* Make sure that anybody stopping the queue after this
273 * sees the new next_to_clean.
274 */
275 smp_mb();
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800276 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
277 !test_bit(__IXGBE_DOWN, &adapter->state)) {
278 netif_wake_subqueue(netdev, tx_ring->queue_index);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700279 ++adapter->restart_queue;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -0800280 }
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -0800281 }
Auke Kok9a799d72007-09-15 14:07:45 -0700282
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700283 if (adapter->detect_tx_hung) {
284 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
285 /* schedule immediate reset if we believe we hung */
286 DPRINTK(PROBE, INFO,
287 "tx hang %d detected, resetting adapter\n",
288 adapter->tx_timeout_count + 1);
289 ixgbe_tx_timeout(adapter->netdev);
290 }
291 }
Auke Kok9a799d72007-09-15 14:07:45 -0700292
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700293 /* re-arm the interrupt */
294 if ((total_packets >= tx_ring->work_limit) ||
295 (count == tx_ring->count))
296 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->v_idx);
Auke Kok9a799d72007-09-15 14:07:45 -0700297
Jesse Brandeburge01c31a2008-08-26 04:27:13 -0700298 tx_ring->total_bytes += total_bytes;
299 tx_ring->total_packets += total_packets;
300 tx_ring->stats.bytes += total_bytes;
301 tx_ring->stats.packets += total_packets;
302 adapter->net_stats.tx_bytes += total_bytes;
303 adapter->net_stats.tx_packets += total_packets;
304 return (total_packets ? true : false);
Auke Kok9a799d72007-09-15 14:07:45 -0700305}
306
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400307#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800308static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700309 struct ixgbe_ring *rx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800310{
311 u32 rxctrl;
312 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700313 int q = rx_ring - adapter->rx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800314
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700315 if (rx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800316 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
317 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700318 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800319 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
320 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
321 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700322 rx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800323 }
324 put_cpu();
325}
326
327static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700328 struct ixgbe_ring *tx_ring)
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800329{
330 u32 txctrl;
331 int cpu = get_cpu();
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700332 int q = tx_ring - adapter->tx_ring;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800333
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700334 if (tx_ring->cpu != cpu) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800335 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
336 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700337 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800338 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
339 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700340 tx_ring->cpu = cpu;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800341 }
342 put_cpu();
343}
344
345static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
346{
347 int i;
348
349 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
350 return;
351
352 for (i = 0; i < adapter->num_tx_queues; i++) {
353 adapter->tx_ring[i].cpu = -1;
354 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
355 }
356 for (i = 0; i < adapter->num_rx_queues; i++) {
357 adapter->rx_ring[i].cpu = -1;
358 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
359 }
360}
361
362static int __ixgbe_notify_dca(struct device *dev, void *data)
363{
364 struct net_device *netdev = dev_get_drvdata(dev);
365 struct ixgbe_adapter *adapter = netdev_priv(netdev);
366 unsigned long event = *(unsigned long *)data;
367
368 switch (event) {
369 case DCA_PROVIDER_ADD:
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700370 /* if we're already enabled, don't do it again */
371 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
372 break;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800373 /* Always use CB2 mode, difference is masked
374 * in the CB driver. */
375 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
Denis V. Lunev652f0932008-03-27 14:39:17 +0300376 if (dca_add_requester(dev) == 0) {
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -0700377 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800378 ixgbe_setup_dca(adapter);
379 break;
380 }
381 /* Fall Through since DCA is disabled. */
382 case DCA_PROVIDER_REMOVE:
383 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
384 dca_remove_requester(dev);
385 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
386 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
387 }
388 break;
389 }
390
Denis V. Lunev652f0932008-03-27 14:39:17 +0300391 return 0;
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800392}
393
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400394#endif /* CONFIG_IXGBE_DCA */
Auke Kok9a799d72007-09-15 14:07:45 -0700395/**
396 * ixgbe_receive_skb - Send a completed packet up the stack
397 * @adapter: board private structure
398 * @skb: packet to send up
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700399 * @status: hardware indication of status of receive
400 * @rx_ring: rx descriptor ring (for a specific queue) to setup
401 * @rx_desc: rx descriptor
Auke Kok9a799d72007-09-15 14:07:45 -0700402 **/
403static void ixgbe_receive_skb(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700404 struct sk_buff *skb, u8 status,
405 struct ixgbe_ring *ring,
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700406 union ixgbe_adv_rx_desc *rx_desc)
Auke Kok9a799d72007-09-15 14:07:45 -0700407{
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700408 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
409 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
Auke Kok9a799d72007-09-15 14:07:45 -0700410
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700411 if (adapter->netdev->features & NETIF_F_LRO &&
412 skb->ip_summed == CHECKSUM_UNNECESSARY) {
Alexander Duyck2f90b862008-11-20 20:52:10 -0800413 if (adapter->vlgrp && is_vlan && (tag != 0))
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700414 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
415 adapter->vlgrp, tag,
416 rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -0700417 else
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700418 lro_receive_skb(&ring->lro_mgr, skb, rx_desc);
419 ring->lro_used = true;
420 } else {
421 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
Alexander Duyck2f90b862008-11-20 20:52:10 -0800422 if (adapter->vlgrp && is_vlan && (tag != 0))
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700423 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, tag);
424 else
425 netif_receive_skb(skb);
426 } else {
Alexander Duyck2f90b862008-11-20 20:52:10 -0800427 if (adapter->vlgrp && is_vlan && (tag != 0))
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700428 vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
429 else
430 netif_rx(skb);
431 }
Auke Kok9a799d72007-09-15 14:07:45 -0700432 }
433}
434
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800435/**
436 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
437 * @adapter: address of board private structure
438 * @status_err: hardware indication of status of receive
439 * @skb: skb currently being received and modified
440 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700441static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700442 u32 status_err, struct sk_buff *skb)
Auke Kok9a799d72007-09-15 14:07:45 -0700443{
444 skb->ip_summed = CHECKSUM_NONE;
445
Jesse Brandeburg712744b2008-08-26 04:26:56 -0700446 /* Rx csum disabled */
447 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -0700448 return;
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800449
450 /* if IP and error */
451 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
452 (status_err & IXGBE_RXDADV_ERR_IPE)) {
Auke Kok9a799d72007-09-15 14:07:45 -0700453 adapter->hw_csum_rx_error++;
454 return;
455 }
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800456
457 if (!(status_err & IXGBE_RXD_STAT_L4CS))
458 return;
459
460 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
461 adapter->hw_csum_rx_error++;
462 return;
463 }
464
Auke Kok9a799d72007-09-15 14:07:45 -0700465 /* It must be a TCP or UDP packet with a valid checksum */
Ayyappan Veeraiyane59bd252008-02-01 15:59:09 -0800466 skb->ip_summed = CHECKSUM_UNNECESSARY;
Auke Kok9a799d72007-09-15 14:07:45 -0700467 adapter->hw_csum_rx_good++;
468}
469
470/**
471 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
472 * @adapter: address of board private structure
473 **/
474static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700475 struct ixgbe_ring *rx_ring,
476 int cleaned_count)
Auke Kok9a799d72007-09-15 14:07:45 -0700477{
Auke Kok9a799d72007-09-15 14:07:45 -0700478 struct pci_dev *pdev = adapter->pdev;
479 union ixgbe_adv_rx_desc *rx_desc;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700480 struct ixgbe_rx_buffer *bi;
Auke Kok9a799d72007-09-15 14:07:45 -0700481 unsigned int i;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700482 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
Auke Kok9a799d72007-09-15 14:07:45 -0700483
484 i = rx_ring->next_to_use;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700485 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700486
487 while (cleaned_count--) {
488 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
489
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700490 if (!bi->page_dma &&
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700491 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700492 if (!bi->page) {
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700493 bi->page = alloc_page(GFP_ATOMIC);
494 if (!bi->page) {
495 adapter->alloc_rx_page_failed++;
496 goto no_buffers;
497 }
498 bi->page_offset = 0;
499 } else {
500 /* use a half page if we're re-using */
501 bi->page_offset ^= (PAGE_SIZE / 2);
Auke Kok9a799d72007-09-15 14:07:45 -0700502 }
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700503
504 bi->page_dma = pci_map_page(pdev, bi->page,
505 bi->page_offset,
506 (PAGE_SIZE / 2),
507 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700508 }
509
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700510 if (!bi->skb) {
Jesse Brandeburg74ce8dd2008-09-11 20:03:23 -0700511 struct sk_buff *skb = netdev_alloc_skb(adapter->netdev,
512 bufsz);
Auke Kok9a799d72007-09-15 14:07:45 -0700513
514 if (!skb) {
515 adapter->alloc_rx_buff_failed++;
516 goto no_buffers;
517 }
518
519 /*
520 * Make buffer alignment 2 beyond a 16 byte boundary
521 * this will result in a 16 byte aligned IP header after
522 * the 14 byte MAC header is removed
523 */
524 skb_reserve(skb, NET_IP_ALIGN);
525
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700526 bi->skb = skb;
527 bi->dma = pci_map_single(pdev, skb->data, bufsz,
528 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700529 }
530 /* Refresh the desc even if buffer_addrs didn't change because
531 * each write-back erases this info. */
532 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700533 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
534 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700535 } else {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700536 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
Auke Kok9a799d72007-09-15 14:07:45 -0700537 }
538
539 i++;
540 if (i == rx_ring->count)
541 i = 0;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700542 bi = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700543 }
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700544
Auke Kok9a799d72007-09-15 14:07:45 -0700545no_buffers:
546 if (rx_ring->next_to_use != i) {
547 rx_ring->next_to_use = i;
548 if (i-- == 0)
549 i = (rx_ring->count - 1);
550
551 /*
552 * Force memory writes to complete before letting h/w
553 * know there are new descriptors to fetch. (Only
554 * applicable for weak-ordered memory model archs,
555 * such as IA-64).
556 */
557 wmb();
558 writel(i, adapter->hw.hw_addr + rx_ring->tail);
559 }
560}
561
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700562static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
563{
564 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
565}
566
567static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
568{
569 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
570}
571
Auke Kok9a799d72007-09-15 14:07:45 -0700572static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700573 struct ixgbe_ring *rx_ring,
574 int *work_done, int work_to_do)
Auke Kok9a799d72007-09-15 14:07:45 -0700575{
Auke Kok9a799d72007-09-15 14:07:45 -0700576 struct pci_dev *pdev = adapter->pdev;
577 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
578 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
579 struct sk_buff *skb;
580 unsigned int i;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700581 u32 len, staterr;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700582 u16 hdr_info;
583 bool cleaned = false;
Auke Kok9a799d72007-09-15 14:07:45 -0700584 int cleaned_count = 0;
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800585 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700586
587 i = rx_ring->next_to_clean;
Auke Kok9a799d72007-09-15 14:07:45 -0700588 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
589 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
590 rx_buffer_info = &rx_ring->rx_buffer_info[i];
Auke Kok9a799d72007-09-15 14:07:45 -0700591
592 while (staterr & IXGBE_RXD_STAT_DD) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700593 u32 upper_len = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700594 if (*work_done >= work_to_do)
595 break;
596 (*work_done)++;
597
598 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700599 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
600 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700601 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -0700602 if (hdr_info & IXGBE_RXDADV_SPH)
603 adapter->rx_hdr_split++;
604 if (len > IXGBE_RX_HDR_SIZE)
605 len = IXGBE_RX_HDR_SIZE;
606 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700607 } else {
Auke Kok9a799d72007-09-15 14:07:45 -0700608 len = le16_to_cpu(rx_desc->wb.upper.length);
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -0700609 }
Auke Kok9a799d72007-09-15 14:07:45 -0700610
611 cleaned = true;
612 skb = rx_buffer_info->skb;
613 prefetch(skb->data - NET_IP_ALIGN);
614 rx_buffer_info->skb = NULL;
615
616 if (len && !skb_shinfo(skb)->nr_frags) {
617 pci_unmap_single(pdev, rx_buffer_info->dma,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700618 rx_ring->rx_buf_len + NET_IP_ALIGN,
619 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700620 skb_put(skb, len);
621 }
622
623 if (upper_len) {
624 pci_unmap_page(pdev, rx_buffer_info->page_dma,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700625 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -0700626 rx_buffer_info->page_dma = 0;
627 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700628 rx_buffer_info->page,
629 rx_buffer_info->page_offset,
630 upper_len);
631
632 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
633 (page_count(rx_buffer_info->page) != 1))
634 rx_buffer_info->page = NULL;
635 else
636 get_page(rx_buffer_info->page);
Auke Kok9a799d72007-09-15 14:07:45 -0700637
638 skb->len += upper_len;
639 skb->data_len += upper_len;
640 skb->truesize += upper_len;
641 }
642
643 i++;
644 if (i == rx_ring->count)
645 i = 0;
646 next_buffer = &rx_ring->rx_buffer_info[i];
647
648 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
649 prefetch(next_rxd);
650
651 cleaned_count++;
652 if (staterr & IXGBE_RXD_STAT_EOP) {
653 rx_ring->stats.packets++;
654 rx_ring->stats.bytes += skb->len;
655 } else {
656 rx_buffer_info->skb = next_buffer->skb;
657 rx_buffer_info->dma = next_buffer->dma;
658 next_buffer->skb = skb;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -0700659 next_buffer->dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -0700660 adapter->non_eop_descs++;
661 goto next_desc;
662 }
663
664 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
665 dev_kfree_skb_irq(skb);
666 goto next_desc;
667 }
668
669 ixgbe_rx_checksum(adapter, staterr, skb);
Ayyappan Veeraiyand2f4fbe2008-02-01 15:59:19 -0800670
671 /* probably a little skewed due to removing CRC */
672 total_rx_bytes += skb->len;
673 total_rx_packets++;
674
Jesse Brandeburg74ce8dd2008-09-11 20:03:23 -0700675 skb->protocol = eth_type_trans(skb, adapter->netdev);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700676 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc);
Auke Kok9a799d72007-09-15 14:07:45 -0700677
678next_desc:
679 rx_desc->wb.upper.status_error = 0;
680
681 /* return some buffers to hardware, one at a time is too slow */
682 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
683 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
684 cleaned_count = 0;
685 }
686
687 /* use prefetched values */
688 rx_desc = next_rxd;
689 rx_buffer_info = next_buffer;
690
691 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -0700692 }
693
694 if (rx_ring->lro_used) {
695 lro_flush_all(&rx_ring->lro_mgr);
696 rx_ring->lro_used = false;
Auke Kok9a799d72007-09-15 14:07:45 -0700697 }
698
699 rx_ring->next_to_clean = i;
700 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
701
702 if (cleaned_count)
703 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
704
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800705 rx_ring->total_packets += total_rx_packets;
706 rx_ring->total_bytes += total_rx_bytes;
707 adapter->net_stats.rx_bytes += total_rx_bytes;
708 adapter->net_stats.rx_packets += total_rx_packets;
709
Auke Kok9a799d72007-09-15 14:07:45 -0700710 return cleaned;
711}
712
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800713static int ixgbe_clean_rxonly(struct napi_struct *, int);
Auke Kok9a799d72007-09-15 14:07:45 -0700714/**
715 * ixgbe_configure_msix - Configure MSI-X hardware
716 * @adapter: board private structure
717 *
718 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
719 * interrupts.
720 **/
721static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
722{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800723 struct ixgbe_q_vector *q_vector;
724 int i, j, q_vectors, v_idx, r_idx;
725 u32 mask;
Auke Kok9a799d72007-09-15 14:07:45 -0700726
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800727 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
728
729 /* Populate the IVAR table and set the ITR values to the
730 * corresponding register.
731 */
732 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
733 q_vector = &adapter->q_vector[v_idx];
734 /* XXX for_each_bit(...) */
735 r_idx = find_first_bit(q_vector->rxr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700736 adapter->num_rx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800737
738 for (i = 0; i < q_vector->rxr_count; i++) {
739 j = adapter->rx_ring[r_idx].reg_idx;
740 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
741 r_idx = find_next_bit(q_vector->rxr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700742 adapter->num_rx_queues,
743 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800744 }
745 r_idx = find_first_bit(q_vector->txr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700746 adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800747
748 for (i = 0; i < q_vector->txr_count; i++) {
749 j = adapter->tx_ring[r_idx].reg_idx;
750 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
751 r_idx = find_next_bit(q_vector->txr_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700752 adapter->num_tx_queues,
753 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800754 }
755
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700756 /* if this is a tx only vector halve the interrupt rate */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800757 if (q_vector->txr_count && !q_vector->rxr_count)
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700758 q_vector->eitr = (adapter->eitr_param >> 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800759 else
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700760 /* rx only */
761 q_vector->eitr = adapter->eitr_param;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800762
763 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700764 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
Auke Kok9a799d72007-09-15 14:07:45 -0700765 }
766
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800767 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
768 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
Auke Kok9a799d72007-09-15 14:07:45 -0700769
Jesse Brandeburg41fb9242008-09-11 19:55:58 -0700770 /* set up to autoclear timer, and the vectors */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800771 mask = IXGBE_EIMS_ENABLE_MASK;
Jesse Brandeburg41fb9242008-09-11 19:55:58 -0700772 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800773 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
Auke Kok9a799d72007-09-15 14:07:45 -0700774}
775
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800776enum latency_range {
777 lowest_latency = 0,
778 low_latency = 1,
779 bulk_latency = 2,
780 latency_invalid = 255
781};
782
783/**
784 * ixgbe_update_itr - update the dynamic ITR value based on statistics
785 * @adapter: pointer to adapter
786 * @eitr: eitr setting (ints per sec) to give last timeslice
787 * @itr_setting: current throttle rate in ints/second
788 * @packets: the number of packets during this measurement interval
789 * @bytes: the number of bytes during this measurement interval
790 *
791 * Stores a new ITR value based on packets and byte
792 * counts during the last interrupt. The advantage of per interrupt
793 * computation is faster updates and more accurate ITR for the current
794 * traffic pattern. Constants in this function were computed
795 * based on theoretical maximum wire speed and thresholds were set based
796 * on testing data as well as attempting to minimize response time
797 * while increasing bulk throughput.
798 * this functionality is controlled by the InterruptThrottleRate module
799 * parameter (see ixgbe_param.c)
800 **/
801static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700802 u32 eitr, u8 itr_setting,
803 int packets, int bytes)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800804{
805 unsigned int retval = itr_setting;
806 u32 timepassed_us;
807 u64 bytes_perint;
808
809 if (packets == 0)
810 goto update_itr_done;
811
812
813 /* simple throttlerate management
814 * 0-20MB/s lowest (100000 ints/s)
815 * 20-100MB/s low (20000 ints/s)
816 * 100-1249MB/s bulk (8000 ints/s)
817 */
818 /* what was last interrupt timeslice? */
819 timepassed_us = 1000000/eitr;
820 bytes_perint = bytes / timepassed_us; /* bytes/usec */
821
822 switch (itr_setting) {
823 case lowest_latency:
824 if (bytes_perint > adapter->eitr_low)
825 retval = low_latency;
826 break;
827 case low_latency:
828 if (bytes_perint > adapter->eitr_high)
829 retval = bulk_latency;
830 else if (bytes_perint <= adapter->eitr_low)
831 retval = lowest_latency;
832 break;
833 case bulk_latency:
834 if (bytes_perint <= adapter->eitr_high)
835 retval = low_latency;
836 break;
837 }
838
839update_itr_done:
840 return retval;
841}
842
843static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
844{
845 struct ixgbe_adapter *adapter = q_vector->adapter;
846 struct ixgbe_hw *hw = &adapter->hw;
847 u32 new_itr;
848 u8 current_itr, ret_itr;
849 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700850 sizeof(struct ixgbe_q_vector);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800851 struct ixgbe_ring *rx_ring, *tx_ring;
852
853 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
854 for (i = 0; i < q_vector->txr_count; i++) {
855 tx_ring = &(adapter->tx_ring[r_idx]);
856 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700857 q_vector->tx_itr,
858 tx_ring->total_packets,
859 tx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800860 /* if the result for this queue would decrease interrupt
861 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700862 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700863 q_vector->tx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800864 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700865 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800866 }
867
868 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
869 for (i = 0; i < q_vector->rxr_count; i++) {
870 rx_ring = &(adapter->rx_ring[r_idx]);
871 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700872 q_vector->rx_itr,
873 rx_ring->total_packets,
874 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800875 /* if the result for this queue would decrease interrupt
876 * rate for this vector then use that result */
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700877 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700878 q_vector->rx_itr - 1 : ret_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800879 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700880 r_idx + 1);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800881 }
882
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700883 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800884
885 switch (current_itr) {
886 /* counts and packets in update_itr are dependent on these numbers */
887 case lowest_latency:
888 new_itr = 100000;
889 break;
890 case low_latency:
891 new_itr = 20000; /* aka hwitr = ~200 */
892 break;
893 case bulk_latency:
894 default:
895 new_itr = 8000;
896 break;
897 }
898
899 if (new_itr != q_vector->eitr) {
900 u32 itr_reg;
901 /* do an exponential smoothing */
902 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
903 q_vector->eitr = new_itr;
904 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
905 /* must write high and low 16 bits to reset counter */
906 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700907 itr_reg);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -0800908 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
909 }
910
911 return;
912}
913
Jesse Brandeburg0befdb32008-10-31 00:46:40 -0700914static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
915{
916 struct ixgbe_hw *hw = &adapter->hw;
917
918 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
919 (eicr & IXGBE_EICR_GPI_SDP1)) {
920 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
921 /* write to clear the interrupt */
922 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
923 }
924}
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -0700925
926static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
927{
928 struct ixgbe_hw *hw = &adapter->hw;
929
930 adapter->lsc_int++;
931 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
932 adapter->link_check_timeout = jiffies;
933 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
934 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
935 schedule_work(&adapter->watchdog_task);
936 }
937}
938
Auke Kok9a799d72007-09-15 14:07:45 -0700939static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
940{
941 struct net_device *netdev = data;
942 struct ixgbe_adapter *adapter = netdev_priv(netdev);
943 struct ixgbe_hw *hw = &adapter->hw;
944 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
945
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -0700946 if (eicr & IXGBE_EICR_LSC)
947 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -0800948
Jesse Brandeburg0befdb32008-10-31 00:46:40 -0700949 ixgbe_check_fan_failure(adapter, eicr);
950
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -0800951 if (!test_bit(__IXGBE_DOWN, &adapter->state))
952 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
Auke Kok9a799d72007-09-15 14:07:45 -0700953
954 return IRQ_HANDLED;
955}
956
957static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
958{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800959 struct ixgbe_q_vector *q_vector = data;
960 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700961 struct ixgbe_ring *tx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800962 int i, r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -0700963
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800964 if (!q_vector->txr_count)
965 return IRQ_HANDLED;
966
967 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
968 for (i = 0; i < q_vector->txr_count; i++) {
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700969 tx_ring = &(adapter->tx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -0400970#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800971 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700972 ixgbe_update_tx_dca(adapter, tx_ring);
Jeb Cramerbd0362d2008-03-03 15:04:02 -0800973#endif
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700974 tx_ring->total_bytes = 0;
975 tx_ring->total_packets = 0;
976 ixgbe_clean_tx_irq(adapter, tx_ring);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800977 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -0700978 r_idx + 1);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800979 }
980
Auke Kok9a799d72007-09-15 14:07:45 -0700981 return IRQ_HANDLED;
982}
983
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800984/**
985 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
986 * @irq: unused
987 * @data: pointer to our q_vector struct for this interrupt vector
988 **/
Auke Kok9a799d72007-09-15 14:07:45 -0700989static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
990{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800991 struct ixgbe_q_vector *q_vector = data;
992 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg3a581072008-08-26 04:27:08 -0700993 struct ixgbe_ring *rx_ring;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800994 int r_idx;
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700995 int i;
Auke Kok9a799d72007-09-15 14:07:45 -0700996
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -0800997 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -0700998 for (i = 0; i < q_vector->rxr_count; i++) {
999 rx_ring = &(adapter->rx_ring[r_idx]);
1000 rx_ring->total_bytes = 0;
1001 rx_ring->total_packets = 0;
1002 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1003 r_idx + 1);
1004 }
1005
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001006 if (!q_vector->rxr_count)
1007 return IRQ_HANDLED;
1008
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001009 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001010 rx_ring = &(adapter->rx_ring[r_idx]);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001011 /* disable interrupts on this vector only */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001012 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001013 netif_rx_schedule(adapter->netdev, &q_vector->napi);
1014
Auke Kok9a799d72007-09-15 14:07:45 -07001015 return IRQ_HANDLED;
1016}
1017
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001018static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1019{
1020 ixgbe_msix_clean_rx(irq, data);
1021 ixgbe_msix_clean_tx(irq, data);
1022
1023 return IRQ_HANDLED;
1024}
1025
1026/**
1027 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1028 * @napi: napi struct with our devices info in it
1029 * @budget: amount of work driver is allowed to do this pass, in packets
1030 *
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001031 * This function is optimized for cleaning one queue only on a single
1032 * q_vector!!!
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001033 **/
Auke Kok9a799d72007-09-15 14:07:45 -07001034static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1035{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001036 struct ixgbe_q_vector *q_vector =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001037 container_of(napi, struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001038 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001039 struct ixgbe_ring *rx_ring = NULL;
Auke Kok9a799d72007-09-15 14:07:45 -07001040 int work_done = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001041 long r_idx;
Auke Kok9a799d72007-09-15 14:07:45 -07001042
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001043 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001044 rx_ring = &(adapter->rx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001045#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001046 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001047 ixgbe_update_rx_dca(adapter, rx_ring);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08001048#endif
Auke Kok9a799d72007-09-15 14:07:45 -07001049
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001050 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07001051
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001052 /* If all Rx work done, exit the polling mode */
1053 if (work_done < budget) {
1054 netif_rx_complete(adapter->netdev, napi);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001055 if (adapter->itr_setting & 3)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001056 ixgbe_set_itr_msix(q_vector);
Auke Kok9a799d72007-09-15 14:07:45 -07001057 if (!test_bit(__IXGBE_DOWN, &adapter->state))
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001058 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx);
Auke Kok9a799d72007-09-15 14:07:45 -07001059 }
1060
1061 return work_done;
1062}
1063
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001064/**
1065 * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine
1066 * @napi: napi struct with our devices info in it
1067 * @budget: amount of work driver is allowed to do this pass, in packets
1068 *
1069 * This function will clean more than one rx queue associated with a
1070 * q_vector.
1071 **/
1072static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
1073{
1074 struct ixgbe_q_vector *q_vector =
1075 container_of(napi, struct ixgbe_q_vector, napi);
1076 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001077 struct ixgbe_ring *rx_ring = NULL;
1078 int work_done = 0, i;
1079 long r_idx;
1080 u16 enable_mask = 0;
1081
1082 /* attempt to distribute budget to each queue fairly, but don't allow
1083 * the budget to go below 1 because we'll exit polling */
1084 budget /= (q_vector->rxr_count ?: 1);
1085 budget = max(budget, 1);
1086 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1087 for (i = 0; i < q_vector->rxr_count; i++) {
1088 rx_ring = &(adapter->rx_ring[r_idx]);
Jeff Garzik5dd2d332008-10-16 05:09:31 -04001089#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001090 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1091 ixgbe_update_rx_dca(adapter, rx_ring);
1092#endif
1093 ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget);
1094 enable_mask |= rx_ring->v_idx;
1095 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1096 r_idx + 1);
1097 }
1098
1099 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1100 rx_ring = &(adapter->rx_ring[r_idx]);
1101 /* If all Rx work done, exit the polling mode */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07001102 if (work_done < budget) {
Jesse Brandeburg74ce8dd2008-09-11 20:03:23 -07001103 netif_rx_complete(adapter->netdev, napi);
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001104 if (adapter->itr_setting & 3)
1105 ixgbe_set_itr_msix(q_vector);
1106 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1107 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask);
1108 return 0;
1109 }
1110
1111 return work_done;
1112}
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001113static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001114 int r_idx)
Auke Kok9a799d72007-09-15 14:07:45 -07001115{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001116 a->q_vector[v_idx].adapter = a;
1117 set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
1118 a->q_vector[v_idx].rxr_count++;
1119 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1120}
Auke Kok9a799d72007-09-15 14:07:45 -07001121
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001122static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001123 int r_idx)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001124{
1125 a->q_vector[v_idx].adapter = a;
1126 set_bit(r_idx, a->q_vector[v_idx].txr_idx);
1127 a->q_vector[v_idx].txr_count++;
1128 a->tx_ring[r_idx].v_idx = 1 << v_idx;
1129}
Auke Kok9a799d72007-09-15 14:07:45 -07001130
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001131/**
1132 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1133 * @adapter: board private structure to initialize
1134 * @vectors: allotted vector count for descriptor rings
1135 *
1136 * This function maps descriptor rings to the queue-specific vectors
1137 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1138 * one vector per ring/queue, but on a constrained vector budget, we
1139 * group the rings as "efficiently" as possible. You would add new
1140 * mapping configurations in here.
1141 **/
1142static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001143 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001144{
1145 int v_start = 0;
1146 int rxr_idx = 0, txr_idx = 0;
1147 int rxr_remaining = adapter->num_rx_queues;
1148 int txr_remaining = adapter->num_tx_queues;
1149 int i, j;
1150 int rqpv, tqpv;
1151 int err = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001152
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001153 /* No mapping required if MSI-X is disabled. */
1154 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
Auke Kok9a799d72007-09-15 14:07:45 -07001155 goto out;
1156
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001157 /*
1158 * The ideal configuration...
1159 * We have enough vectors to map one per queue.
1160 */
1161 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1162 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1163 map_vector_to_rxq(adapter, v_start, rxr_idx);
1164
1165 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1166 map_vector_to_txq(adapter, v_start, txr_idx);
1167
1168 goto out;
1169 }
1170
1171 /*
1172 * If we don't have enough vectors for a 1-to-1
1173 * mapping, we'll have to group them so there are
1174 * multiple queues per vector.
1175 */
1176 /* Re-adjusting *qpv takes care of the remainder. */
1177 for (i = v_start; i < vectors; i++) {
1178 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1179 for (j = 0; j < rqpv; j++) {
1180 map_vector_to_rxq(adapter, i, rxr_idx);
1181 rxr_idx++;
1182 rxr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001183 }
Auke Kok9a799d72007-09-15 14:07:45 -07001184 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001185 for (i = v_start; i < vectors; i++) {
1186 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1187 for (j = 0; j < tqpv; j++) {
1188 map_vector_to_txq(adapter, i, txr_idx);
1189 txr_idx++;
1190 txr_remaining--;
Auke Kok9a799d72007-09-15 14:07:45 -07001191 }
Auke Kok9a799d72007-09-15 14:07:45 -07001192 }
1193
Auke Kok9a799d72007-09-15 14:07:45 -07001194out:
Auke Kok9a799d72007-09-15 14:07:45 -07001195 return err;
1196}
1197
1198/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001199 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1200 * @adapter: board private structure
1201 *
1202 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1203 * interrupts from the kernel.
1204 **/
1205static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1206{
1207 struct net_device *netdev = adapter->netdev;
1208 irqreturn_t (*handler)(int, void *);
1209 int i, vector, q_vectors, err;
Robert Olssoncb13fc22008-11-25 16:43:52 -08001210 int ri=0, ti=0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001211
1212 /* Decrement for Other and TCP Timer vectors */
1213 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1214
1215 /* Map the Tx/Rx rings to the vectors we were allotted. */
1216 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1217 if (err)
1218 goto out;
1219
1220#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001221 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1222 &ixgbe_msix_clean_many)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001223 for (vector = 0; vector < q_vectors; vector++) {
1224 handler = SET_HANDLER(&adapter->q_vector[vector]);
Robert Olssoncb13fc22008-11-25 16:43:52 -08001225
1226 if(handler == &ixgbe_msix_clean_rx) {
1227 sprintf(adapter->name[vector], "%s-%s-%d",
1228 netdev->name, "rx", ri++);
1229 }
1230 else if(handler == &ixgbe_msix_clean_tx) {
1231 sprintf(adapter->name[vector], "%s-%s-%d",
1232 netdev->name, "tx", ti++);
1233 }
1234 else
1235 sprintf(adapter->name[vector], "%s-%s-%d",
1236 netdev->name, "TxRx", vector);
1237
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001238 err = request_irq(adapter->msix_entries[vector].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001239 handler, 0, adapter->name[vector],
1240 &(adapter->q_vector[vector]));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001241 if (err) {
1242 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001243 "request_irq failed for MSIX interrupt "
1244 "Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001245 goto free_queue_irqs;
1246 }
1247 }
1248
1249 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1250 err = request_irq(adapter->msix_entries[vector].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001251 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001252 if (err) {
1253 DPRINTK(PROBE, ERR,
1254 "request_irq for msix_lsc failed: %d\n", err);
1255 goto free_queue_irqs;
1256 }
1257
1258 return 0;
1259
1260free_queue_irqs:
1261 for (i = vector - 1; i >= 0; i--)
1262 free_irq(adapter->msix_entries[--vector].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001263 &(adapter->q_vector[i]));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001264 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1265 pci_disable_msix(adapter->pdev);
1266 kfree(adapter->msix_entries);
1267 adapter->msix_entries = NULL;
1268out:
1269 return err;
1270}
1271
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001272static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1273{
1274 struct ixgbe_hw *hw = &adapter->hw;
1275 struct ixgbe_q_vector *q_vector = adapter->q_vector;
1276 u8 current_itr;
1277 u32 new_itr = q_vector->eitr;
1278 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1279 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1280
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001281 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001282 q_vector->tx_itr,
1283 tx_ring->total_packets,
1284 tx_ring->total_bytes);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001285 q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001286 q_vector->rx_itr,
1287 rx_ring->total_packets,
1288 rx_ring->total_bytes);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001289
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001290 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001291
1292 switch (current_itr) {
1293 /* counts and packets in update_itr are dependent on these numbers */
1294 case lowest_latency:
1295 new_itr = 100000;
1296 break;
1297 case low_latency:
1298 new_itr = 20000; /* aka hwitr = ~200 */
1299 break;
1300 case bulk_latency:
1301 new_itr = 8000;
1302 break;
1303 default:
1304 break;
1305 }
1306
1307 if (new_itr != q_vector->eitr) {
1308 u32 itr_reg;
1309 /* do an exponential smoothing */
1310 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1311 q_vector->eitr = new_itr;
1312 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1313 /* must write high and low 16 bits to reset counter */
1314 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
1315 }
1316
1317 return;
1318}
1319
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001320/**
1321 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1322 * @adapter: board private structure
1323 **/
1324static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1325{
1326 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1327 IXGBE_WRITE_FLUSH(&adapter->hw);
1328 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1329 int i;
1330 for (i = 0; i < adapter->num_msix_vectors; i++)
1331 synchronize_irq(adapter->msix_entries[i].vector);
1332 } else {
1333 synchronize_irq(adapter->pdev->irq);
1334 }
1335}
1336
1337/**
1338 * ixgbe_irq_enable - Enable default interrupt generation settings
1339 * @adapter: board private structure
1340 **/
1341static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1342{
1343 u32 mask;
1344 mask = IXGBE_EIMS_ENABLE_MASK;
David S. Miller6ab33d52008-11-20 16:44:00 -08001345 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1346 mask |= IXGBE_EIMS_GPI_SDP1;
Alexey Dobriyan79aefa42008-11-19 14:17:02 -08001347 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1348 IXGBE_WRITE_FLUSH(&adapter->hw);
1349}
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001350
1351/**
1352 * ixgbe_intr - legacy mode Interrupt Handler
Auke Kok9a799d72007-09-15 14:07:45 -07001353 * @irq: interrupt number
1354 * @data: pointer to a network interface device structure
Auke Kok9a799d72007-09-15 14:07:45 -07001355 **/
1356static irqreturn_t ixgbe_intr(int irq, void *data)
1357{
1358 struct net_device *netdev = data;
1359 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1360 struct ixgbe_hw *hw = &adapter->hw;
1361 u32 eicr;
1362
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001363 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1364 * therefore no explict interrupt disable is necessary */
1365 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001366 if (!eicr) {
1367 /* shared interrupt alert!
1368 * make sure interrupts are enabled because the read will
1369 * have disabled interrupts due to EIAM */
1370 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001371 return IRQ_NONE; /* Not our interrupt */
Jesse Brandeburgf47cf662008-09-11 19:56:14 -07001372 }
Auke Kok9a799d72007-09-15 14:07:45 -07001373
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07001374 if (eicr & IXGBE_EICR_LSC)
1375 ixgbe_check_lsc(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001376
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07001377 ixgbe_check_fan_failure(adapter, eicr);
1378
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001379 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08001380 adapter->tx_ring[0].total_packets = 0;
1381 adapter->tx_ring[0].total_bytes = 0;
1382 adapter->rx_ring[0].total_packets = 0;
1383 adapter->rx_ring[0].total_bytes = 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001384 /* would disable interrupts here but EIAM disabled it */
1385 __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
Auke Kok9a799d72007-09-15 14:07:45 -07001386 }
1387
1388 return IRQ_HANDLED;
1389}
1390
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001391static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1392{
1393 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1394
1395 for (i = 0; i < q_vectors; i++) {
1396 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
1397 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1398 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1399 q_vector->rxr_count = 0;
1400 q_vector->txr_count = 0;
1401 }
1402}
1403
Auke Kok9a799d72007-09-15 14:07:45 -07001404/**
1405 * ixgbe_request_irq - initialize interrupts
1406 * @adapter: board private structure
1407 *
1408 * Attempts to configure interrupts using the best available
1409 * capabilities of the hardware and kernel.
1410 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001411static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07001412{
1413 struct net_device *netdev = adapter->netdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001414 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07001415
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001416 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1417 err = ixgbe_request_msix_irqs(adapter);
1418 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1419 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001420 netdev->name, netdev);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001421 } else {
1422 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001423 netdev->name, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001424 }
1425
Auke Kok9a799d72007-09-15 14:07:45 -07001426 if (err)
1427 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1428
Auke Kok9a799d72007-09-15 14:07:45 -07001429 return err;
1430}
1431
1432static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1433{
1434 struct net_device *netdev = adapter->netdev;
1435
1436 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001437 int i, q_vectors;
Auke Kok9a799d72007-09-15 14:07:45 -07001438
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001439 q_vectors = adapter->num_msix_vectors;
1440
1441 i = q_vectors - 1;
Auke Kok9a799d72007-09-15 14:07:45 -07001442 free_irq(adapter->msix_entries[i].vector, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001443
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001444 i--;
1445 for (; i >= 0; i--) {
1446 free_irq(adapter->msix_entries[i].vector,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001447 &(adapter->q_vector[i]));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001448 }
1449
1450 ixgbe_reset_q_vectors(adapter);
1451 } else {
1452 free_irq(adapter->pdev->irq, netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001453 }
1454}
1455
1456/**
Auke Kok9a799d72007-09-15 14:07:45 -07001457 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1458 *
1459 **/
1460static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1461{
Auke Kok9a799d72007-09-15 14:07:45 -07001462 struct ixgbe_hw *hw = &adapter->hw;
1463
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001464 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07001465 EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
Auke Kok9a799d72007-09-15 14:07:45 -07001466
1467 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001468 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
1469
1470 map_vector_to_rxq(adapter, 0, 0);
1471 map_vector_to_txq(adapter, 0, 0);
1472
1473 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
Auke Kok9a799d72007-09-15 14:07:45 -07001474}
1475
1476/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001477 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07001478 * @adapter: board private structure
1479 *
1480 * Configure the Tx unit of the MAC after a reset.
1481 **/
1482static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1483{
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001484 u64 tdba, tdwba;
Auke Kok9a799d72007-09-15 14:07:45 -07001485 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001486 u32 i, j, tdlen, txctrl;
Auke Kok9a799d72007-09-15 14:07:45 -07001487
1488 /* Setup the HW Tx Head and Tail descriptor pointers */
1489 for (i = 0; i < adapter->num_tx_queues; i++) {
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001490 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1491 j = ring->reg_idx;
1492 tdba = ring->dma;
1493 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001494 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001495 (tdba & DMA_32BIT_MASK));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001496 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001497 tdwba = ring->dma +
1498 (ring->count * sizeof(union ixgbe_adv_tx_desc));
1499 tdwba |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
1500 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(j), tdwba & DMA_32BIT_MASK);
1501 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(j), (tdwba >> 32));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001502 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1503 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1504 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1505 adapter->tx_ring[i].head = IXGBE_TDH(j);
1506 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1507 /* Disable Tx Head Writeback RO bit, since this hoses
1508 * bookkeeping if things aren't delivered in order.
1509 */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001510 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001511 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07001512 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07001513 }
Auke Kok9a799d72007-09-15 14:07:45 -07001514}
1515
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001516#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
Auke Kok9a799d72007-09-15 14:07:45 -07001517
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001518static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
1519{
1520 struct ixgbe_ring *rx_ring;
1521 u32 srrctl;
1522 int queue0;
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001523 unsigned long mask;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001524
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001525 /* program one srrctl register per VMDq index */
1526 if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) {
1527 long shift, len;
1528 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1529 len = sizeof(adapter->ring_feature[RING_F_VMDQ].mask) * 8;
1530 shift = find_first_bit(&mask, len);
1531 queue0 = index & mask;
1532 index = (index & mask) >> shift;
1533 /* program one srrctl per RSS queue since RDRXCTL.MVMEN is enabled */
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001534 } else {
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001535 mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
1536 queue0 = index & mask;
1537 index = index & mask;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001538 }
Alexander Duyck3be1adf2008-08-30 00:29:10 -07001539
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001540 rx_ring = &adapter->rx_ring[queue0];
1541
1542 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1543
1544 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1545 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1546
1547 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1548 srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1549 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1550 srrctl |= ((IXGBE_RX_HDR_SIZE <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001551 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1552 IXGBE_SRRCTL_BSIZEHDR_MASK);
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001553 } else {
1554 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1555
1556 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1557 srrctl |= IXGBE_RXBUFFER_2048 >>
1558 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1559 else
1560 srrctl |= rx_ring->rx_buf_len >>
1561 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1562 }
1563 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1564}
1565
Auke Kok9a799d72007-09-15 14:07:45 -07001566/**
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001567 * ixgbe_get_skb_hdr - helper function for LRO header processing
1568 * @skb: pointer to sk_buff to be added to LRO packet
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001569 * @iphdr: pointer to ip header structure
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001570 * @tcph: pointer to tcp header structure
1571 * @hdr_flags: pointer to header flags
1572 * @priv: private data
1573 **/
1574static int ixgbe_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
1575 u64 *hdr_flags, void *priv)
1576{
1577 union ixgbe_adv_rx_desc *rx_desc = priv;
1578
1579 /* Verify that this is a valid IPv4 TCP packet */
Jesse Brandeburge9990a92008-08-26 04:27:24 -07001580 if (!((ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_IPV4) &&
1581 (ixgbe_get_pkt_info(rx_desc) & IXGBE_RXDADV_PKTTYPE_TCP)))
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001582 return -1;
1583
1584 /* Set network headers */
1585 skb_reset_network_header(skb);
1586 skb_set_transport_header(skb, ip_hdrlen(skb));
1587 *iphdr = ip_hdr(skb);
1588 *tcph = tcp_hdr(skb);
1589 *hdr_flags = LRO_IPV4 | LRO_TCP;
1590 return 0;
1591}
1592
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001593#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001594 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001595
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001596/**
Jesse Brandeburg3a581072008-08-26 04:27:08 -07001597 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
Auke Kok9a799d72007-09-15 14:07:45 -07001598 * @adapter: board private structure
1599 *
1600 * Configure the Rx unit of the MAC after a reset.
1601 **/
1602static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1603{
1604 u64 rdba;
1605 struct ixgbe_hw *hw = &adapter->hw;
1606 struct net_device *netdev = adapter->netdev;
1607 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001608 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07001609 u32 rdlen, rxctrl, rxcsum;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001610 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
1611 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1612 0x6A3E67EA, 0x14364D17, 0x3BED200D};
Auke Kok9a799d72007-09-15 14:07:45 -07001613 u32 fctrl, hlreg0;
Auke Kok9a799d72007-09-15 14:07:45 -07001614 u32 pages;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001615 u32 reta = 0, mrqc;
1616 u32 rdrxctl;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001617 int rx_buf_len;
Auke Kok9a799d72007-09-15 14:07:45 -07001618
1619 /* Decide whether to use packet split mode or not */
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07001620 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
Auke Kok9a799d72007-09-15 14:07:45 -07001621
1622 /* Set the RX buffer length according to the mode */
1623 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001624 rx_buf_len = IXGBE_RX_HDR_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07001625 } else {
1626 if (netdev->mtu <= ETH_DATA_LEN)
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001627 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
Auke Kok9a799d72007-09-15 14:07:45 -07001628 else
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001629 rx_buf_len = ALIGN(max_frame, 1024);
Auke Kok9a799d72007-09-15 14:07:45 -07001630 }
1631
1632 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1633 fctrl |= IXGBE_FCTRL_BAM;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001634 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
Auke Kok9a799d72007-09-15 14:07:45 -07001635 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
1636
1637 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1638 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1639 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
1640 else
1641 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
1642 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
1643
1644 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1645
Auke Kok9a799d72007-09-15 14:07:45 -07001646 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1647 /* disable receives while setting up the descriptors */
1648 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1649 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
1650
1651 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1652 * the Base and Length of the Rx Descriptor Ring */
1653 for (i = 0; i < adapter->num_rx_queues; i++) {
1654 rdba = adapter->rx_ring[i].dma;
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001655 j = adapter->rx_ring[i].reg_idx;
1656 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_32BIT_MASK));
1657 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
1658 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
1659 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
1660 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
1661 adapter->rx_ring[i].head = IXGBE_RDH(j);
1662 adapter->rx_ring[i].tail = IXGBE_RDT(j);
1663 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
Jesse Brandeburge9990a92008-08-26 04:27:24 -07001664 /* Intitial LRO Settings */
1665 adapter->rx_ring[i].lro_mgr.max_aggr = IXGBE_MAX_LRO_AGGREGATE;
1666 adapter->rx_ring[i].lro_mgr.max_desc = IXGBE_MAX_LRO_DESCRIPTORS;
1667 adapter->rx_ring[i].lro_mgr.get_skb_header = ixgbe_get_skb_hdr;
1668 adapter->rx_ring[i].lro_mgr.features = LRO_F_EXTRACT_VLAN_ID;
1669 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1670 adapter->rx_ring[i].lro_mgr.features |= LRO_F_NAPI;
1671 adapter->rx_ring[i].lro_mgr.dev = adapter->netdev;
1672 adapter->rx_ring[i].lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1673 adapter->rx_ring[i].lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001674
1675 ixgbe_configure_srrctl(adapter, j);
Auke Kok9a799d72007-09-15 14:07:45 -07001676 }
1677
Jesse Brandeburgcc41ac72008-08-26 04:27:27 -07001678 /*
1679 * For VMDq support of different descriptor types or
1680 * buffer sizes through the use of multiple SRRCTL
1681 * registers, RDRXCTL.MVMEN must be set to 1
1682 *
1683 * also, the manual doesn't mention it clearly but DCA hints
1684 * will only use queue 0's tags unless this bit is set. Side
1685 * effects of setting this bit are only that SRRCTL must be
1686 * fully programmed [0..15]
1687 */
Alexander Duyck2f90b862008-11-20 20:52:10 -08001688 if (adapter->flags &
1689 (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_VMDQ_ENABLED)) {
1690 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
1691 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
1692 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
1693 }
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07001694
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001695 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
Auke Kok9a799d72007-09-15 14:07:45 -07001696 /* Fill out redirection table */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001697 for (i = 0, j = 0; i < 128; i++, j++) {
1698 if (j == adapter->ring_feature[RING_F_RSS].indices)
1699 j = 0;
1700 /* reta = 4-byte sliding window of
1701 * 0x00..(indices-1)(indices-1)00..etc. */
1702 reta = (reta << 8) | (j * 0x11);
1703 if ((i & 3) == 3)
1704 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
Auke Kok9a799d72007-09-15 14:07:45 -07001705 }
1706
1707 /* Fill out hash function seeds */
1708 for (i = 0; i < 10; i++)
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001709 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
Auke Kok9a799d72007-09-15 14:07:45 -07001710
1711 mrqc = IXGBE_MRQC_RSSEN
1712 /* Perform hash on these packet types */
Jesse Brandeburg7c6e0a42008-08-26 04:27:16 -07001713 | IXGBE_MRQC_RSS_FIELD_IPV4
1714 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
1715 | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
1716 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
1717 | IXGBE_MRQC_RSS_FIELD_IPV6_EX
1718 | IXGBE_MRQC_RSS_FIELD_IPV6
1719 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
1720 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1721 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
Auke Kok9a799d72007-09-15 14:07:45 -07001722 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
Auke Kok9a799d72007-09-15 14:07:45 -07001723 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001724
1725 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1726
1727 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
1728 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1729 /* Disable indicating checksum in descriptor, enables
1730 * RSS hash */
1731 rxcsum |= IXGBE_RXCSUM_PCSD;
1732 }
1733 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
1734 /* Enable IPv4 payload checksum for UDP fragments
1735 * if PCSD is not set */
1736 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1737 }
1738
1739 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
Auke Kok9a799d72007-09-15 14:07:45 -07001740}
1741
1742static void ixgbe_vlan_rx_register(struct net_device *netdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001743 struct vlan_group *grp)
Auke Kok9a799d72007-09-15 14:07:45 -07001744{
1745 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1746 u32 ctrl;
1747
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001748 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1749 ixgbe_irq_disable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001750 adapter->vlgrp = grp;
1751
Alexander Duyck2f90b862008-11-20 20:52:10 -08001752 /*
1753 * For a DCB driver, always enable VLAN tag stripping so we can
1754 * still receive traffic from a DCB-enabled host even if we're
1755 * not in DCB mode.
1756 */
1757 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
1758 ctrl |= IXGBE_VLNCTRL_VME;
1759 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1760 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1761
Auke Kok9a799d72007-09-15 14:07:45 -07001762 if (grp) {
1763 /* enable VLAN tag insert/strip */
1764 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
Patrick McHardy746b9f02008-07-16 20:15:45 -07001765 ctrl |= IXGBE_VLNCTRL_VME;
Auke Kok9a799d72007-09-15 14:07:45 -07001766 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
1767 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
1768 }
1769
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001770 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1771 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001772}
1773
1774static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1775{
1776 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07001777 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07001778
1779 /* add VID to filter table */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07001780 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
Auke Kok9a799d72007-09-15 14:07:45 -07001781}
1782
1783static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1784{
1785 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07001786 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07001787
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001788 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1789 ixgbe_irq_disable(adapter);
1790
Auke Kok9a799d72007-09-15 14:07:45 -07001791 vlan_group_set_device(adapter->vlgrp, vid, NULL);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08001792
1793 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1794 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07001795
1796 /* remove VID from filter table */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07001797 hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
Auke Kok9a799d72007-09-15 14:07:45 -07001798}
1799
1800static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
1801{
1802 ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1803
1804 if (adapter->vlgrp) {
1805 u16 vid;
1806 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
1807 if (!vlan_group_get_device(adapter->vlgrp, vid))
1808 continue;
1809 ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
1810 }
1811 }
1812}
1813
Christopher Leech2c5645c2008-08-26 04:27:02 -07001814static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
1815{
1816 struct dev_mc_list *mc_ptr;
1817 u8 *addr = *mc_addr_ptr;
1818 *vmdq = 0;
1819
1820 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1821 if (mc_ptr->next)
1822 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1823 else
1824 *mc_addr_ptr = NULL;
1825
1826 return addr;
1827}
1828
Auke Kok9a799d72007-09-15 14:07:45 -07001829/**
Christopher Leech2c5645c2008-08-26 04:27:02 -07001830 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
Auke Kok9a799d72007-09-15 14:07:45 -07001831 * @netdev: network interface device structure
1832 *
Christopher Leech2c5645c2008-08-26 04:27:02 -07001833 * The set_rx_method entry point is called whenever the unicast/multicast
1834 * address list or the network interface flags are updated. This routine is
1835 * responsible for configuring the hardware for proper unicast, multicast and
1836 * promiscuous mode.
Auke Kok9a799d72007-09-15 14:07:45 -07001837 **/
Christopher Leech2c5645c2008-08-26 04:27:02 -07001838static void ixgbe_set_rx_mode(struct net_device *netdev)
Auke Kok9a799d72007-09-15 14:07:45 -07001839{
1840 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1841 struct ixgbe_hw *hw = &adapter->hw;
Alexander Duyck3d016252008-08-26 18:30:04 -07001842 u32 fctrl, vlnctrl;
Christopher Leech2c5645c2008-08-26 04:27:02 -07001843 u8 *addr_list = NULL;
1844 int addr_count = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001845
1846 /* Check for Promiscuous and All Multicast modes */
1847
1848 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
Alexander Duyck3d016252008-08-26 18:30:04 -07001849 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
Auke Kok9a799d72007-09-15 14:07:45 -07001850
1851 if (netdev->flags & IFF_PROMISC) {
Christopher Leech2c5645c2008-08-26 04:27:02 -07001852 hw->addr_ctrl.user_set_promisc = 1;
Auke Kok9a799d72007-09-15 14:07:45 -07001853 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
Alexander Duyck3d016252008-08-26 18:30:04 -07001854 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
Auke Kok9a799d72007-09-15 14:07:45 -07001855 } else {
Patrick McHardy746b9f02008-07-16 20:15:45 -07001856 if (netdev->flags & IFF_ALLMULTI) {
1857 fctrl |= IXGBE_FCTRL_MPE;
1858 fctrl &= ~IXGBE_FCTRL_UPE;
1859 } else {
1860 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
1861 }
Alexander Duyck3d016252008-08-26 18:30:04 -07001862 vlnctrl |= IXGBE_VLNCTRL_VFE;
Christopher Leech2c5645c2008-08-26 04:27:02 -07001863 hw->addr_ctrl.user_set_promisc = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001864 }
1865
1866 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
Alexander Duyck3d016252008-08-26 18:30:04 -07001867 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
Auke Kok9a799d72007-09-15 14:07:45 -07001868
Christopher Leech2c5645c2008-08-26 04:27:02 -07001869 /* reprogram secondary unicast list */
1870 addr_count = netdev->uc_count;
1871 if (addr_count)
1872 addr_list = netdev->uc_list->dmi_addr;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07001873 hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count,
1874 ixgbe_addr_list_itr);
Auke Kok9a799d72007-09-15 14:07:45 -07001875
Christopher Leech2c5645c2008-08-26 04:27:02 -07001876 /* reprogram multicast list */
1877 addr_count = netdev->mc_count;
1878 if (addr_count)
1879 addr_list = netdev->mc_list->dmi_addr;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07001880 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
1881 ixgbe_addr_list_itr);
Auke Kok9a799d72007-09-15 14:07:45 -07001882}
1883
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001884static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1885{
1886 int q_idx;
1887 struct ixgbe_q_vector *q_vector;
1888 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1889
1890 /* legacy and MSI only use one vector */
1891 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1892 q_vectors = 1;
1893
1894 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001895 struct napi_struct *napi;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001896 q_vector = &adapter->q_vector[q_idx];
1897 if (!q_vector->rxr_count)
1898 continue;
Jesse Brandeburgf0848272008-09-11 19:59:42 -07001899 napi = &q_vector->napi;
1900 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) &&
1901 (q_vector->rxr_count > 1))
1902 napi->poll = &ixgbe_clean_rxonly_many;
1903
1904 napi_enable(napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001905 }
1906}
1907
1908static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1909{
1910 int q_idx;
1911 struct ixgbe_q_vector *q_vector;
1912 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1913
1914 /* legacy and MSI only use one vector */
1915 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1916 q_vectors = 1;
1917
1918 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1919 q_vector = &adapter->q_vector[q_idx];
1920 if (!q_vector->rxr_count)
1921 continue;
1922 napi_disable(&q_vector->napi);
1923 }
1924}
1925
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08001926#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08001927/*
1928 * ixgbe_configure_dcb - Configure DCB hardware
1929 * @adapter: ixgbe adapter struct
1930 *
1931 * This is called by the driver on open to configure the DCB hardware.
1932 * This is also called by the gennetlink interface when reconfiguring
1933 * the DCB state.
1934 */
1935static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
1936{
1937 struct ixgbe_hw *hw = &adapter->hw;
1938 u32 txdctl, vlnctrl;
1939 int i, j;
1940
1941 ixgbe_dcb_check_config(&adapter->dcb_cfg);
1942 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
1943 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
1944
1945 /* reconfigure the hardware */
1946 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
1947
1948 for (i = 0; i < adapter->num_tx_queues; i++) {
1949 j = adapter->tx_ring[i].reg_idx;
1950 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1951 /* PThresh workaround for Tx hang with DFP enabled. */
1952 txdctl |= 32;
1953 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1954 }
1955 /* Enable VLAN tag insert/strip */
1956 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
1957 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
1958 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
1959 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
1960 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
1961}
1962
1963#endif
Auke Kok9a799d72007-09-15 14:07:45 -07001964static void ixgbe_configure(struct ixgbe_adapter *adapter)
1965{
1966 struct net_device *netdev = adapter->netdev;
1967 int i;
1968
Christopher Leech2c5645c2008-08-26 04:27:02 -07001969 ixgbe_set_rx_mode(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07001970
1971 ixgbe_restore_vlan(adapter);
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08001972#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08001973 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1974 netif_set_gso_max_size(netdev, 32768);
1975 ixgbe_configure_dcb(adapter);
1976 } else {
1977 netif_set_gso_max_size(netdev, 65536);
1978 }
1979#else
1980 netif_set_gso_max_size(netdev, 65536);
1981#endif
Auke Kok9a799d72007-09-15 14:07:45 -07001982
1983 ixgbe_configure_tx(adapter);
1984 ixgbe_configure_rx(adapter);
1985 for (i = 0; i < adapter->num_rx_queues; i++)
1986 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07001987 (adapter->rx_ring[i].count - 1));
Auke Kok9a799d72007-09-15 14:07:45 -07001988}
1989
1990static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1991{
1992 struct net_device *netdev = adapter->netdev;
Auke Kok9a799d72007-09-15 14:07:45 -07001993 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001994 int i, j = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07001995 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08001996 u32 txdctl, rxdctl, mhadd;
1997 u32 gpie;
Auke Kok9a799d72007-09-15 14:07:45 -07001998
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08001999 ixgbe_get_hw_control(adapter);
2000
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002001 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
2002 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
Auke Kok9a799d72007-09-15 14:07:45 -07002003 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2004 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002005 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
Auke Kok9a799d72007-09-15 14:07:45 -07002006 } else {
2007 /* MSI only */
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002008 gpie = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002009 }
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002010 /* XXX: to interrupt immediately for EICS writes, enable this */
2011 /* gpie |= IXGBE_GPIE_EIMEN; */
2012 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2013 }
2014
2015 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2016 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
2017 * specifically only auto mask tx and rx interrupts */
2018 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
Auke Kok9a799d72007-09-15 14:07:45 -07002019 }
2020
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002021 /* Enable fan failure interrupt if media type is copper */
2022 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2023 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2024 gpie |= IXGBE_SDP1_GPIEN;
2025 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2026 }
2027
Auke Kok9a799d72007-09-15 14:07:45 -07002028 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
Auke Kok9a799d72007-09-15 14:07:45 -07002029 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2030 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2031 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2032
2033 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2034 }
2035
2036 for (i = 0; i < adapter->num_tx_queues; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002037 j = adapter->tx_ring[i].reg_idx;
2038 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002039 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2040 txdctl |= (8 << 16);
Auke Kok9a799d72007-09-15 14:07:45 -07002041 txdctl |= IXGBE_TXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002042 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07002043 }
2044
2045 for (i = 0; i < adapter->num_rx_queues; i++) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002046 j = adapter->rx_ring[i].reg_idx;
2047 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2048 /* enable PTHRESH=32 descriptors (half the internal cache)
2049 * and HTHRESH=0 descriptors (to minimize latency on fetch),
2050 * this also removes a pesky rx_no_buffer_count increment */
2051 rxdctl |= 0x0020;
Auke Kok9a799d72007-09-15 14:07:45 -07002052 rxdctl |= IXGBE_RXDCTL_ENABLE;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002053 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
Auke Kok9a799d72007-09-15 14:07:45 -07002054 }
2055 /* enable all receives */
2056 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2057 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
2058 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxdctl);
2059
2060 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2061 ixgbe_configure_msix(adapter);
2062 else
2063 ixgbe_configure_msi_and_legacy(adapter);
2064
2065 clear_bit(__IXGBE_DOWN, &adapter->state);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002066 ixgbe_napi_enable_all(adapter);
2067
2068 /* clear any pending interrupts, may auto mask */
2069 IXGBE_READ_REG(hw, IXGBE_EICR);
2070
Auke Kok9a799d72007-09-15 14:07:45 -07002071 ixgbe_irq_enable(adapter);
2072
2073 /* bring the link up in the watchdog, this could race with our first
2074 * link up interrupt but shouldn't be a problem */
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002075 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2076 adapter->link_check_timeout = jiffies;
Auke Kok9a799d72007-09-15 14:07:45 -07002077 mod_timer(&adapter->watchdog_timer, jiffies);
2078 return 0;
2079}
2080
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002081void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
2082{
2083 WARN_ON(in_interrupt());
2084 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
2085 msleep(1);
2086 ixgbe_down(adapter);
2087 ixgbe_up(adapter);
2088 clear_bit(__IXGBE_RESETTING, &adapter->state);
2089}
2090
Auke Kok9a799d72007-09-15 14:07:45 -07002091int ixgbe_up(struct ixgbe_adapter *adapter)
2092{
2093 /* hardware has been reset, we need to reload some things */
2094 ixgbe_configure(adapter);
2095
2096 return ixgbe_up_complete(adapter);
2097}
2098
2099void ixgbe_reset(struct ixgbe_adapter *adapter)
2100{
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002101 struct ixgbe_hw *hw = &adapter->hw;
2102 if (hw->mac.ops.init_hw(hw))
2103 dev_err(&adapter->pdev->dev, "Hardware Error\n");
Auke Kok9a799d72007-09-15 14:07:45 -07002104
2105 /* reprogram the RAR[0] in case user changed it. */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002106 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07002107
2108}
2109
Auke Kok9a799d72007-09-15 14:07:45 -07002110/**
2111 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
2112 * @adapter: board private structure
2113 * @rx_ring: ring to free buffers from
2114 **/
2115static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002116 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002117{
2118 struct pci_dev *pdev = adapter->pdev;
2119 unsigned long size;
2120 unsigned int i;
2121
2122 /* Free all the Rx ring sk_buffs */
2123
2124 for (i = 0; i < rx_ring->count; i++) {
2125 struct ixgbe_rx_buffer *rx_buffer_info;
2126
2127 rx_buffer_info = &rx_ring->rx_buffer_info[i];
2128 if (rx_buffer_info->dma) {
2129 pci_unmap_single(pdev, rx_buffer_info->dma,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002130 rx_ring->rx_buf_len,
2131 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07002132 rx_buffer_info->dma = 0;
2133 }
2134 if (rx_buffer_info->skb) {
2135 dev_kfree_skb(rx_buffer_info->skb);
2136 rx_buffer_info->skb = NULL;
2137 }
2138 if (!rx_buffer_info->page)
2139 continue;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002140 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
2141 PCI_DMA_FROMDEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07002142 rx_buffer_info->page_dma = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002143 put_page(rx_buffer_info->page);
2144 rx_buffer_info->page = NULL;
Jesse Brandeburg762f4c52008-09-11 19:58:43 -07002145 rx_buffer_info->page_offset = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002146 }
2147
2148 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2149 memset(rx_ring->rx_buffer_info, 0, size);
2150
2151 /* Zero out the descriptor ring */
2152 memset(rx_ring->desc, 0, rx_ring->size);
2153
2154 rx_ring->next_to_clean = 0;
2155 rx_ring->next_to_use = 0;
2156
2157 writel(0, adapter->hw.hw_addr + rx_ring->head);
2158 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2159}
2160
2161/**
2162 * ixgbe_clean_tx_ring - Free Tx Buffers
2163 * @adapter: board private structure
2164 * @tx_ring: ring to be cleaned
2165 **/
2166static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002167 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002168{
2169 struct ixgbe_tx_buffer *tx_buffer_info;
2170 unsigned long size;
2171 unsigned int i;
2172
2173 /* Free all the Tx ring sk_buffs */
2174
2175 for (i = 0; i < tx_ring->count; i++) {
2176 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2177 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2178 }
2179
2180 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2181 memset(tx_ring->tx_buffer_info, 0, size);
2182
2183 /* Zero out the descriptor ring */
2184 memset(tx_ring->desc, 0, tx_ring->size);
2185
2186 tx_ring->next_to_use = 0;
2187 tx_ring->next_to_clean = 0;
2188
2189 writel(0, adapter->hw.hw_addr + tx_ring->head);
2190 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2191}
2192
2193/**
Auke Kok9a799d72007-09-15 14:07:45 -07002194 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2195 * @adapter: board private structure
2196 **/
2197static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
2198{
2199 int i;
2200
2201 for (i = 0; i < adapter->num_rx_queues; i++)
2202 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2203}
2204
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002205/**
2206 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2207 * @adapter: board private structure
2208 **/
2209static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
2210{
2211 int i;
2212
2213 for (i = 0; i < adapter->num_tx_queues; i++)
2214 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2215}
2216
Auke Kok9a799d72007-09-15 14:07:45 -07002217void ixgbe_down(struct ixgbe_adapter *adapter)
2218{
2219 struct net_device *netdev = adapter->netdev;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002220 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07002221 u32 rxctrl;
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002222 u32 txdctl;
2223 int i, j;
Auke Kok9a799d72007-09-15 14:07:45 -07002224
2225 /* signal that we are down to the interrupt handler */
2226 set_bit(__IXGBE_DOWN, &adapter->state);
2227
2228 /* disable receives */
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002229 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2230 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
Auke Kok9a799d72007-09-15 14:07:45 -07002231
2232 netif_tx_disable(netdev);
2233
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002234 IXGBE_WRITE_FLUSH(hw);
Auke Kok9a799d72007-09-15 14:07:45 -07002235 msleep(10);
2236
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002237 netif_tx_stop_all_queues(netdev);
2238
Auke Kok9a799d72007-09-15 14:07:45 -07002239 ixgbe_irq_disable(adapter);
2240
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002241 ixgbe_napi_disable_all(adapter);
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002242
Auke Kok9a799d72007-09-15 14:07:45 -07002243 del_timer_sync(&adapter->watchdog_timer);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07002244 cancel_work_sync(&adapter->watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07002245
Jesse Brandeburg7f821872008-09-11 20:00:16 -07002246 /* disable transmits in the hardware now that interrupts are off */
2247 for (i = 0; i < adapter->num_tx_queues; i++) {
2248 j = adapter->tx_ring[i].reg_idx;
2249 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2250 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
2251 (txdctl & ~IXGBE_TXDCTL_ENABLE));
2252 }
2253
Auke Kok9a799d72007-09-15 14:07:45 -07002254 netif_carrier_off(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07002255
Jeff Garzik5dd2d332008-10-16 05:09:31 -04002256#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07002257 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2258 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
2259 dca_remove_requester(&adapter->pdev->dev);
2260 }
2261
2262#endif
Paul Larson6f4a0e42008-06-24 17:00:56 -07002263 if (!pci_channel_offline(adapter->pdev))
2264 ixgbe_reset(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002265 ixgbe_clean_all_tx_rings(adapter);
2266 ixgbe_clean_all_rx_rings(adapter);
2267
Jeff Garzik5dd2d332008-10-16 05:09:31 -04002268#ifdef CONFIG_IXGBE_DCA
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07002269 /* since we reset the hardware DCA settings were cleared */
2270 if (dca_add_requester(&adapter->pdev->dev) == 0) {
2271 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
2272 /* always use CB2 mode, difference is masked
2273 * in the CB driver */
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002274 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
Jesse Brandeburg96b0e0f2008-08-26 04:27:21 -07002275 ixgbe_setup_dca(adapter);
2276 }
2277#endif
Auke Kok9a799d72007-09-15 14:07:45 -07002278}
2279
Auke Kok9a799d72007-09-15 14:07:45 -07002280/**
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002281 * ixgbe_poll - NAPI Rx polling callback
2282 * @napi: structure for representing this polling device
2283 * @budget: how many packets driver is allowed to clean
2284 *
2285 * This function is used for legacy and MSI, NAPI mode
Auke Kok9a799d72007-09-15 14:07:45 -07002286 **/
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002287static int ixgbe_poll(struct napi_struct *napi, int budget)
Auke Kok9a799d72007-09-15 14:07:45 -07002288{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002289 struct ixgbe_q_vector *q_vector = container_of(napi,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002290 struct ixgbe_q_vector, napi);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002291 struct ixgbe_adapter *adapter = q_vector->adapter;
Jesse Brandeburg74ce8dd2008-09-11 20:03:23 -07002292 int tx_cleaned, work_done = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002293
Jeff Garzik5dd2d332008-10-16 05:09:31 -04002294#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08002295 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2296 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2297 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
2298 }
2299#endif
2300
David S. Millerd2c7ddd2008-01-15 22:43:24 -08002301 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002302 ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
Auke Kok9a799d72007-09-15 14:07:45 -07002303
David S. Millerd2c7ddd2008-01-15 22:43:24 -08002304 if (tx_cleaned)
2305 work_done = budget;
2306
David S. Miller53e52c72008-01-07 21:06:12 -08002307 /* If budget not fully consumed, exit the polling mode */
2308 if (work_done < budget) {
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002309 netif_rx_complete(adapter->netdev, napi);
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002310 if (adapter->itr_setting & 3)
Ayyappan Veeraiyanf494e8f2008-03-03 15:03:57 -08002311 ixgbe_set_itr(adapter);
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002312 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2313 ixgbe_irq_enable(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002314 }
Auke Kok9a799d72007-09-15 14:07:45 -07002315 return work_done;
2316}
2317
2318/**
2319 * ixgbe_tx_timeout - Respond to a Tx Hang
2320 * @netdev: network interface device structure
2321 **/
2322static void ixgbe_tx_timeout(struct net_device *netdev)
2323{
2324 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2325
2326 /* Do the reset outside of interrupt context */
2327 schedule_work(&adapter->reset_task);
2328}
2329
2330static void ixgbe_reset_task(struct work_struct *work)
2331{
2332 struct ixgbe_adapter *adapter;
2333 adapter = container_of(work, struct ixgbe_adapter, reset_task);
2334
Alexander Duyck2f90b862008-11-20 20:52:10 -08002335 /* If we're already down or resetting, just bail */
2336 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
2337 test_bit(__IXGBE_RESETTING, &adapter->state))
2338 return;
2339
Auke Kok9a799d72007-09-15 14:07:45 -07002340 adapter->tx_timeout_count++;
2341
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08002342 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002343}
2344
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002345static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2346{
2347 int nrq = 1, ntq = 1;
2348 int feature_mask = 0, rss_i, rss_m;
Alexander Duyck2f90b862008-11-20 20:52:10 -08002349 int dcb_i, dcb_m;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002350
2351 /* Number of supported queues */
2352 switch (adapter->hw.mac.type) {
2353 case ixgbe_mac_82598EB:
Alexander Duyck2f90b862008-11-20 20:52:10 -08002354 dcb_i = adapter->ring_feature[RING_F_DCB].indices;
2355 dcb_m = 0;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002356 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2357 rss_m = 0;
2358 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
Alexander Duyck2f90b862008-11-20 20:52:10 -08002359 feature_mask |= IXGBE_FLAG_DCB_ENABLED;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002360
2361 switch (adapter->flags & feature_mask) {
Alexander Duyck2f90b862008-11-20 20:52:10 -08002362 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
2363 dcb_m = 0x7 << 3;
2364 rss_i = min(8, rss_i);
2365 rss_m = 0x7;
2366 nrq = dcb_i * rss_i;
2367 ntq = min(MAX_TX_QUEUES, dcb_i * rss_i);
2368 break;
2369 case (IXGBE_FLAG_DCB_ENABLED):
2370 dcb_m = 0x7 << 3;
2371 nrq = dcb_i;
2372 ntq = dcb_i;
2373 break;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002374 case (IXGBE_FLAG_RSS_ENABLED):
2375 rss_m = 0xF;
2376 nrq = rss_i;
2377 ntq = rss_i;
2378 break;
2379 case 0:
2380 default:
Alexander Duyck2f90b862008-11-20 20:52:10 -08002381 dcb_i = 0;
2382 dcb_m = 0;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002383 rss_i = 0;
2384 rss_m = 0;
2385 nrq = 1;
2386 ntq = 1;
2387 break;
2388 }
2389
Alexander Duyck2f90b862008-11-20 20:52:10 -08002390 /* Sanity check, we should never have zero queues */
2391 nrq = (nrq ?:1);
2392 ntq = (ntq ?:1);
2393
2394 adapter->ring_feature[RING_F_DCB].indices = dcb_i;
2395 adapter->ring_feature[RING_F_DCB].mask = dcb_m;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002396 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2397 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2398 break;
2399 default:
2400 nrq = 1;
2401 ntq = 1;
2402 break;
2403 }
2404
2405 adapter->num_rx_queues = nrq;
2406 adapter->num_tx_queues = ntq;
2407}
2408
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002409static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002410 int vectors)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002411{
2412 int err, vector_threshold;
2413
2414 /* We'll want at least 3 (vector_threshold):
2415 * 1) TxQ[0] Cleanup
2416 * 2) RxQ[0] Cleanup
2417 * 3) Other (Link Status Change, etc.)
2418 * 4) TCP Timer (optional)
2419 */
2420 vector_threshold = MIN_MSIX_COUNT;
2421
2422 /* The more we get, the more we will assign to Tx/Rx Cleanup
2423 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2424 * Right now, we simply care about how many we'll get; we'll
2425 * set them up later while requesting irq's.
2426 */
2427 while (vectors >= vector_threshold) {
2428 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002429 vectors);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002430 if (!err) /* Success in acquiring all requested vectors. */
2431 break;
2432 else if (err < 0)
2433 vectors = 0; /* Nasty failure, quit now */
2434 else /* err == number of vectors we should try again with */
2435 vectors = err;
2436 }
2437
2438 if (vectors < vector_threshold) {
2439 /* Can't allocate enough MSI-X interrupts? Oh well.
2440 * This just means we'll go with either a single MSI
2441 * vector or fall back to legacy interrupts.
2442 */
2443 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
2444 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2445 kfree(adapter->msix_entries);
2446 adapter->msix_entries = NULL;
Alexander Duyck2f90b862008-11-20 20:52:10 -08002447 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002448 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002449 ixgbe_set_num_queues(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002450 } else {
2451 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2452 adapter->num_msix_vectors = vectors;
2453 }
2454}
2455
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002456/**
2457 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2458 * @adapter: board private structure to initialize
2459 *
2460 * Once we know the feature-set enabled for the device, we'll cache
2461 * the register offset the descriptor ring is assigned to.
2462 **/
Al Virofeea6a52008-11-27 15:34:07 -08002463static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002464{
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002465 int feature_mask = 0, rss_i;
2466 int i, txr_idx, rxr_idx;
Alexander Duyck2f90b862008-11-20 20:52:10 -08002467 int dcb_i;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002468
2469 /* Number of supported queues */
2470 switch (adapter->hw.mac.type) {
2471 case ixgbe_mac_82598EB:
Alexander Duyck2f90b862008-11-20 20:52:10 -08002472 dcb_i = adapter->ring_feature[RING_F_DCB].indices;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002473 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2474 txr_idx = 0;
2475 rxr_idx = 0;
Alexander Duyck2f90b862008-11-20 20:52:10 -08002476 feature_mask |= IXGBE_FLAG_DCB_ENABLED;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002477 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2478 switch (adapter->flags & feature_mask) {
Alexander Duyck2f90b862008-11-20 20:52:10 -08002479 case (IXGBE_FLAG_RSS_ENABLED | IXGBE_FLAG_DCB_ENABLED):
2480 for (i = 0; i < dcb_i; i++) {
2481 int j;
2482 /* Rx first */
2483 for (j = 0; j < adapter->num_rx_queues; j++) {
2484 adapter->rx_ring[rxr_idx].reg_idx =
2485 i << 3 | j;
2486 rxr_idx++;
2487 }
2488 /* Tx now */
2489 for (j = 0; j < adapter->num_tx_queues; j++) {
2490 adapter->tx_ring[txr_idx].reg_idx =
2491 i << 2 | (j >> 1);
2492 if (j & 1)
2493 txr_idx++;
2494 }
2495 }
2496 case (IXGBE_FLAG_DCB_ENABLED):
2497 /* the number of queues is assumed to be symmetric */
2498 for (i = 0; i < dcb_i; i++) {
2499 adapter->rx_ring[i].reg_idx = i << 3;
2500 adapter->tx_ring[i].reg_idx = i << 2;
2501 }
2502 break;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002503 case (IXGBE_FLAG_RSS_ENABLED):
2504 for (i = 0; i < adapter->num_rx_queues; i++)
2505 adapter->rx_ring[i].reg_idx = i;
2506 for (i = 0; i < adapter->num_tx_queues; i++)
2507 adapter->tx_ring[i].reg_idx = i;
2508 break;
2509 case 0:
2510 default:
2511 break;
2512 }
2513 break;
2514 default:
2515 break;
2516 }
2517}
2518
Auke Kok9a799d72007-09-15 14:07:45 -07002519/**
2520 * ixgbe_alloc_queues - Allocate memory for all rings
2521 * @adapter: board private structure to initialize
2522 *
2523 * We allocate one ring per queue at run-time since we don't know the
Wang Chena4d2f342008-12-03 22:05:58 -08002524 * number of queues at compile-time.
Auke Kok9a799d72007-09-15 14:07:45 -07002525 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08002526static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
Auke Kok9a799d72007-09-15 14:07:45 -07002527{
2528 int i;
2529
2530 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002531 sizeof(struct ixgbe_ring), GFP_KERNEL);
Auke Kok9a799d72007-09-15 14:07:45 -07002532 if (!adapter->tx_ring)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002533 goto err_tx_ring_allocation;
Auke Kok9a799d72007-09-15 14:07:45 -07002534
2535 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002536 sizeof(struct ixgbe_ring), GFP_KERNEL);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002537 if (!adapter->rx_ring)
2538 goto err_rx_ring_allocation;
2539
2540 for (i = 0; i < adapter->num_tx_queues; i++) {
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002541 adapter->tx_ring[i].count = adapter->tx_ring_count;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002542 adapter->tx_ring[i].queue_index = i;
2543 }
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002544
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002545 for (i = 0; i < adapter->num_rx_queues; i++) {
Jesse Brandeburgb9804972008-09-11 20:00:29 -07002546 adapter->rx_ring[i].count = adapter->rx_ring_count;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002547 adapter->rx_ring[i].queue_index = i;
Auke Kok9a799d72007-09-15 14:07:45 -07002548 }
2549
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002550 ixgbe_cache_ring_register(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07002551
2552 return 0;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002553
2554err_rx_ring_allocation:
2555 kfree(adapter->tx_ring);
2556err_tx_ring_allocation:
2557 return -ENOMEM;
2558}
2559
2560/**
2561 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
2562 * @adapter: board private structure to initialize
2563 *
2564 * Attempt to configure the interrupts using the best available
2565 * capabilities of the hardware and the kernel.
2566 **/
Al Virofeea6a52008-11-27 15:34:07 -08002567static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002568{
2569 int err = 0;
2570 int vector, v_budget;
2571
2572 /*
2573 * It's easy to be greedy for MSI-X vectors, but it really
2574 * doesn't do us much good if we have a lot more vectors
2575 * than CPU's. So let's be conservative and only ask for
2576 * (roughly) twice the number of vectors as there are CPU's.
2577 */
2578 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002579 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002580
2581 /*
2582 * At the same time, hardware can only support a maximum of
2583 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq,
2584 * we can easily reach upwards of 64 Rx descriptor queues and
2585 * 32 Tx queues. Thus, we cap it off in those rare cases where
2586 * the cpu count also exceeds our vector limit.
2587 */
2588 v_budget = min(v_budget, MAX_MSIX_COUNT);
2589
2590 /* A failure in MSI-X entry allocation isn't fatal, but it does
2591 * mean we disable MSI-X capabilities of the adapter. */
2592 adapter->msix_entries = kcalloc(v_budget,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002593 sizeof(struct msix_entry), GFP_KERNEL);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002594 if (!adapter->msix_entries) {
Alexander Duyck2f90b862008-11-20 20:52:10 -08002595 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002596 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2597 ixgbe_set_num_queues(adapter);
2598 kfree(adapter->tx_ring);
2599 kfree(adapter->rx_ring);
2600 err = ixgbe_alloc_queues(adapter);
2601 if (err) {
2602 DPRINTK(PROBE, ERR, "Unable to allocate memory "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002603 "for queues\n");
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002604 goto out;
2605 }
2606
2607 goto try_msi;
2608 }
2609
2610 for (vector = 0; vector < v_budget; vector++)
2611 adapter->msix_entries[vector].entry = vector;
2612
2613 ixgbe_acquire_msix_vectors(adapter, v_budget);
2614
2615 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2616 goto out;
2617
2618try_msi:
2619 err = pci_enable_msi(adapter->pdev);
2620 if (!err) {
2621 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
2622 } else {
2623 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002624 "falling back to legacy. Error: %d\n", err);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002625 /* reset err */
2626 err = 0;
2627 }
2628
2629out:
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08002630 /* Notify the stack of the (possibly) reduced Tx Queue count. */
David S. Millerfd2ea0a2008-07-17 01:56:23 -07002631 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002632
2633 return err;
2634}
2635
Alexander Duyck2f90b862008-11-20 20:52:10 -08002636void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002637{
2638 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2639 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2640 pci_disable_msix(adapter->pdev);
2641 kfree(adapter->msix_entries);
2642 adapter->msix_entries = NULL;
2643 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2644 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
2645 pci_disable_msi(adapter->pdev);
2646 }
2647 return;
2648}
2649
2650/**
2651 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2652 * @adapter: board private structure to initialize
2653 *
2654 * We determine which interrupt scheme to use based on...
2655 * - Kernel support (MSI, MSI-X)
2656 * - which can be user-defined (via MODULE_PARAM)
2657 * - Hardware queue count (num_*_queues)
2658 * - defined by miscellaneous hardware support/features (RSS, etc.)
2659 **/
Alexander Duyck2f90b862008-11-20 20:52:10 -08002660int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002661{
2662 int err;
2663
2664 /* Number of supported queues */
2665 ixgbe_set_num_queues(adapter);
2666
2667 err = ixgbe_alloc_queues(adapter);
2668 if (err) {
2669 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
2670 goto err_alloc_queues;
2671 }
2672
2673 err = ixgbe_set_interrupt_capability(adapter);
2674 if (err) {
2675 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
2676 goto err_set_interrupt;
2677 }
2678
2679 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002680 "Tx Queue count = %u\n",
2681 (adapter->num_rx_queues > 1) ? "Enabled" :
2682 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002683
2684 set_bit(__IXGBE_DOWN, &adapter->state);
2685
2686 return 0;
2687
2688err_set_interrupt:
2689 kfree(adapter->tx_ring);
2690 kfree(adapter->rx_ring);
2691err_alloc_queues:
2692 return err;
Auke Kok9a799d72007-09-15 14:07:45 -07002693}
2694
2695/**
Donald Skidmorec4900be2008-11-20 21:11:42 -08002696 * ixgbe_sfp_timer - worker thread to find a missing module
2697 * @data: pointer to our adapter struct
2698 **/
2699static void ixgbe_sfp_timer(unsigned long data)
2700{
2701 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
2702
2703 /* Do the sfp_timer outside of interrupt context due to the
2704 * delays that sfp+ detection requires
2705 */
2706 schedule_work(&adapter->sfp_task);
2707}
2708
2709/**
2710 * ixgbe_sfp_task - worker thread to find a missing module
2711 * @work: pointer to work_struct containing our data
2712 **/
2713static void ixgbe_sfp_task(struct work_struct *work)
2714{
2715 struct ixgbe_adapter *adapter = container_of(work,
2716 struct ixgbe_adapter,
2717 sfp_task);
2718 struct ixgbe_hw *hw = &adapter->hw;
2719
2720 if ((hw->phy.type == ixgbe_phy_nl) &&
2721 (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
2722 s32 ret = hw->phy.ops.identify_sfp(hw);
2723 if (ret)
2724 goto reschedule;
2725 ret = hw->phy.ops.reset(hw);
2726 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2727 DPRINTK(PROBE, ERR, "failed to initialize because an "
2728 "unsupported SFP+ module type was detected.\n"
2729 "Reload the driver after installing a "
2730 "supported module.\n");
2731 unregister_netdev(adapter->netdev);
2732 } else {
2733 DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
2734 hw->phy.sfp_type);
2735 }
2736 /* don't need this routine any more */
2737 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
2738 }
2739 return;
2740reschedule:
2741 if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
2742 mod_timer(&adapter->sfp_timer,
2743 round_jiffies(jiffies + (2 * HZ)));
2744}
2745
2746/**
Auke Kok9a799d72007-09-15 14:07:45 -07002747 * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
2748 * @adapter: board private structure to initialize
2749 *
2750 * ixgbe_sw_init initializes the Adapter private data structure.
2751 * Fields are initialized based on PCI device information and
2752 * OS network device settings (MTU size).
2753 **/
2754static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
2755{
2756 struct ixgbe_hw *hw = &adapter->hw;
2757 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002758 unsigned int rss;
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002759#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08002760 int j;
2761 struct tc_configuration *tc;
2762#endif
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002763
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002764 /* PCI config space info */
2765
2766 hw->vendor_id = pdev->vendor;
2767 hw->device_id = pdev->device;
2768 hw->revision_id = pdev->revision;
2769 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2770 hw->subsystem_device_id = pdev->subsystem_device;
2771
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002772 /* Set capability flags */
2773 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2774 adapter->ring_feature[RING_F_RSS].indices = rss;
2775 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
Alexander Duyck2f90b862008-11-20 20:52:10 -08002776 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
2777
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08002778#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08002779 /* Configure DCB traffic classes */
2780 for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
2781 tc = &adapter->dcb_cfg.tc_config[j];
2782 tc->path[DCB_TX_CONFIG].bwg_id = 0;
2783 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
2784 tc->path[DCB_RX_CONFIG].bwg_id = 0;
2785 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
2786 tc->dcb_pfc = pfc_disabled;
2787 }
2788 adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
2789 adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
2790 adapter->dcb_cfg.rx_pba_cfg = pba_equal;
2791 adapter->dcb_cfg.round_robin_enable = false;
2792 adapter->dcb_set_bitmap = 0x00;
2793 ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
2794 adapter->ring_feature[RING_F_DCB].indices);
2795
2796#endif
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07002797 if (hw->mac.ops.get_media_type &&
2798 (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper))
2799 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
Auke Kok9a799d72007-09-15 14:07:45 -07002800
2801 /* default flow control settings */
Jesse Brandeburg2b9ade92008-08-26 04:27:10 -07002802 hw->fc.original_type = ixgbe_fc_none;
2803 hw->fc.type = ixgbe_fc_none;
2804 hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
2805 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
2806 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
2807 hw->fc.send_xon = true;
Auke Kok9a799d72007-09-15 14:07:45 -07002808
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002809 /* select 10G link by default */
Auke Kok9a799d72007-09-15 14:07:45 -07002810 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
Auke Kok9a799d72007-09-15 14:07:45 -07002811
Jesse Brandeburg30efa5a2008-09-11 19:58:14 -07002812 /* enable itr by default in dynamic mode */
2813 adapter->itr_setting = 1;
2814 adapter->eitr_param = 20000;
2815
2816 /* set defaults for eitr in MegaBytes */
2817 adapter->eitr_low = 10;
2818 adapter->eitr_high = 20;
2819
2820 /* set default ring sizes */
2821 adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
2822 adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
2823
Auke Kok9a799d72007-09-15 14:07:45 -07002824 /* initialize eeprom parameters */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07002825 if (ixgbe_init_eeprom_params_generic(hw)) {
Auke Kok9a799d72007-09-15 14:07:45 -07002826 dev_err(&pdev->dev, "EEPROM initialization failed\n");
2827 return -EIO;
2828 }
2829
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002830 /* enable rx csum by default */
Auke Kok9a799d72007-09-15 14:07:45 -07002831 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2832
Auke Kok9a799d72007-09-15 14:07:45 -07002833 set_bit(__IXGBE_DOWN, &adapter->state);
2834
2835 return 0;
2836}
2837
2838/**
2839 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
2840 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002841 * @tx_ring: tx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07002842 *
2843 * Return 0 on success, negative on failure
2844 **/
2845int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002846 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002847{
2848 struct pci_dev *pdev = adapter->pdev;
2849 int size;
2850
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002851 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2852 tx_ring->tx_buffer_info = vmalloc(size);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002853 if (!tx_ring->tx_buffer_info)
2854 goto err;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002855 memset(tx_ring->tx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07002856
2857 /* round up to nearest 4K */
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002858 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc) +
2859 sizeof(u32);
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002860 tx_ring->size = ALIGN(tx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07002861
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002862 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
2863 &tx_ring->dma);
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002864 if (!tx_ring->desc)
2865 goto err;
Auke Kok9a799d72007-09-15 14:07:45 -07002866
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002867 tx_ring->next_to_use = 0;
2868 tx_ring->next_to_clean = 0;
2869 tx_ring->work_limit = tx_ring->count;
Auke Kok9a799d72007-09-15 14:07:45 -07002870 return 0;
Jesse Brandeburge01c31a2008-08-26 04:27:13 -07002871
2872err:
2873 vfree(tx_ring->tx_buffer_info);
2874 tx_ring->tx_buffer_info = NULL;
2875 DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
2876 "descriptor ring\n");
2877 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07002878}
2879
2880/**
Alexander Duyck69888672008-09-11 20:05:39 -07002881 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
2882 * @adapter: board private structure
2883 *
2884 * If this function returns with an error, then it's possible one or
2885 * more of the rings is populated (while the rest are not). It is the
2886 * callers duty to clean those orphaned rings.
2887 *
2888 * Return 0 on success, negative on failure
2889 **/
2890static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
2891{
2892 int i, err = 0;
2893
2894 for (i = 0; i < adapter->num_tx_queues; i++) {
2895 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2896 if (!err)
2897 continue;
2898 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
2899 break;
2900 }
2901
2902 return err;
2903}
2904
2905/**
Auke Kok9a799d72007-09-15 14:07:45 -07002906 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
2907 * @adapter: board private structure
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002908 * @rx_ring: rx descriptor ring (for a specific queue) to setup
Auke Kok9a799d72007-09-15 14:07:45 -07002909 *
2910 * Returns 0 on success, negative on failure
2911 **/
2912int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002913 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002914{
2915 struct pci_dev *pdev = adapter->pdev;
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08002916 int size;
Auke Kok9a799d72007-09-15 14:07:45 -07002917
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002918 size = sizeof(struct net_lro_desc) * IXGBE_MAX_LRO_DESCRIPTORS;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002919 rx_ring->lro_mgr.lro_arr = vmalloc(size);
2920 if (!rx_ring->lro_mgr.lro_arr)
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002921 return -ENOMEM;
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002922 memset(rx_ring->lro_mgr.lro_arr, 0, size);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002923
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002924 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2925 rx_ring->rx_buffer_info = vmalloc(size);
2926 if (!rx_ring->rx_buffer_info) {
Auke Kok9a799d72007-09-15 14:07:45 -07002927 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002928 "vmalloc allocation failed for the rx desc ring\n");
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002929 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07002930 }
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002931 memset(rx_ring->rx_buffer_info, 0, size);
Auke Kok9a799d72007-09-15 14:07:45 -07002932
Auke Kok9a799d72007-09-15 14:07:45 -07002933 /* Round up to nearest 4K */
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002934 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2935 rx_ring->size = ALIGN(rx_ring->size, 4096);
Auke Kok9a799d72007-09-15 14:07:45 -07002936
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002937 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
Auke Kok9a799d72007-09-15 14:07:45 -07002938
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002939 if (!rx_ring->desc) {
Auke Kok9a799d72007-09-15 14:07:45 -07002940 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07002941 "Memory allocation failed for the rx desc ring\n");
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002942 vfree(rx_ring->rx_buffer_info);
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002943 goto alloc_failed;
Auke Kok9a799d72007-09-15 14:07:45 -07002944 }
2945
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002946 rx_ring->next_to_clean = 0;
2947 rx_ring->next_to_use = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07002948
2949 return 0;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002950
2951alloc_failed:
Jesse Brandeburg3a581072008-08-26 04:27:08 -07002952 vfree(rx_ring->lro_mgr.lro_arr);
2953 rx_ring->lro_mgr.lro_arr = NULL;
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07002954 return -ENOMEM;
Auke Kok9a799d72007-09-15 14:07:45 -07002955}
2956
2957/**
Alexander Duyck69888672008-09-11 20:05:39 -07002958 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
2959 * @adapter: board private structure
2960 *
2961 * If this function returns with an error, then it's possible one or
2962 * more of the rings is populated (while the rest are not). It is the
2963 * callers duty to clean those orphaned rings.
2964 *
2965 * Return 0 on success, negative on failure
2966 **/
2967
2968static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
2969{
2970 int i, err = 0;
2971
2972 for (i = 0; i < adapter->num_rx_queues; i++) {
2973 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2974 if (!err)
2975 continue;
2976 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
2977 break;
2978 }
2979
2980 return err;
2981}
2982
2983/**
Auke Kok9a799d72007-09-15 14:07:45 -07002984 * ixgbe_free_tx_resources - Free Tx Resources per Queue
2985 * @adapter: board private structure
2986 * @tx_ring: Tx descriptor ring for a specific queue
2987 *
2988 * Free all transmit software resources
2989 **/
Jesse Brandeburgc431f972008-09-11 19:59:16 -07002990void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
2991 struct ixgbe_ring *tx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07002992{
2993 struct pci_dev *pdev = adapter->pdev;
2994
2995 ixgbe_clean_tx_ring(adapter, tx_ring);
2996
2997 vfree(tx_ring->tx_buffer_info);
2998 tx_ring->tx_buffer_info = NULL;
2999
3000 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
3001
3002 tx_ring->desc = NULL;
3003}
3004
3005/**
3006 * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
3007 * @adapter: board private structure
3008 *
3009 * Free all transmit software resources
3010 **/
3011static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
3012{
3013 int i;
3014
3015 for (i = 0; i < adapter->num_tx_queues; i++)
3016 ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
3017}
3018
3019/**
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003020 * ixgbe_free_rx_resources - Free Rx Resources
Auke Kok9a799d72007-09-15 14:07:45 -07003021 * @adapter: board private structure
3022 * @rx_ring: ring to clean the resources from
3023 *
3024 * Free all receive software resources
3025 **/
Jesse Brandeburgc431f972008-09-11 19:59:16 -07003026void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
3027 struct ixgbe_ring *rx_ring)
Auke Kok9a799d72007-09-15 14:07:45 -07003028{
3029 struct pci_dev *pdev = adapter->pdev;
3030
Mallikarjuna R Chilakala177db6f2008-06-18 15:32:19 -07003031 vfree(rx_ring->lro_mgr.lro_arr);
3032 rx_ring->lro_mgr.lro_arr = NULL;
3033
Auke Kok9a799d72007-09-15 14:07:45 -07003034 ixgbe_clean_rx_ring(adapter, rx_ring);
3035
3036 vfree(rx_ring->rx_buffer_info);
3037 rx_ring->rx_buffer_info = NULL;
3038
3039 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
3040
3041 rx_ring->desc = NULL;
3042}
3043
3044/**
3045 * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
3046 * @adapter: board private structure
3047 *
3048 * Free all receive software resources
3049 **/
3050static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
3051{
3052 int i;
3053
3054 for (i = 0; i < adapter->num_rx_queues; i++)
3055 ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
3056}
3057
3058/**
Auke Kok9a799d72007-09-15 14:07:45 -07003059 * ixgbe_change_mtu - Change the Maximum Transfer Unit
3060 * @netdev: network interface device structure
3061 * @new_mtu: new value for maximum frame size
3062 *
3063 * Returns 0 on success, negative on failure
3064 **/
3065static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
3066{
3067 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3068 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3069
Jesse Brandeburg42c783c2008-09-11 19:56:28 -07003070 /* MTU < 68 is an error and causes problems on some kernels */
3071 if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
Auke Kok9a799d72007-09-15 14:07:45 -07003072 return -EINVAL;
3073
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003074 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003075 netdev->mtu, new_mtu);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003076 /* must set new MTU before calling down or up */
Auke Kok9a799d72007-09-15 14:07:45 -07003077 netdev->mtu = new_mtu;
3078
Ayyappan Veeraiyand4f80882008-02-01 15:58:41 -08003079 if (netif_running(netdev))
3080 ixgbe_reinit_locked(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003081
3082 return 0;
3083}
3084
3085/**
3086 * ixgbe_open - Called when a network interface is made active
3087 * @netdev: network interface device structure
3088 *
3089 * Returns 0 on success, negative value on failure
3090 *
3091 * The open entry point is called when a network interface is made
3092 * active by the system (IFF_UP). At this point all resources needed
3093 * for transmit and receive operations are allocated, the interrupt
3094 * handler is registered with the OS, the watchdog timer is started,
3095 * and the stack is notified that the interface is ready.
3096 **/
3097static int ixgbe_open(struct net_device *netdev)
3098{
3099 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3100 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07003101
Auke Kok4bebfaa2008-02-11 09:26:01 -08003102 /* disallow open during test */
3103 if (test_bit(__IXGBE_TESTING, &adapter->state))
3104 return -EBUSY;
3105
Auke Kok9a799d72007-09-15 14:07:45 -07003106 /* allocate transmit descriptors */
3107 err = ixgbe_setup_all_tx_resources(adapter);
3108 if (err)
3109 goto err_setup_tx;
3110
Auke Kok9a799d72007-09-15 14:07:45 -07003111 /* allocate receive descriptors */
3112 err = ixgbe_setup_all_rx_resources(adapter);
3113 if (err)
3114 goto err_setup_rx;
3115
3116 ixgbe_configure(adapter);
3117
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08003118 err = ixgbe_request_irq(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003119 if (err)
3120 goto err_req_irq;
3121
Auke Kok9a799d72007-09-15 14:07:45 -07003122 err = ixgbe_up_complete(adapter);
3123 if (err)
3124 goto err_up;
3125
Jeff Kirsherd55b53f2008-07-18 04:33:03 -07003126 netif_tx_start_all_queues(netdev);
3127
Auke Kok9a799d72007-09-15 14:07:45 -07003128 return 0;
3129
3130err_up:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08003131 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003132 ixgbe_free_irq(adapter);
3133err_req_irq:
3134 ixgbe_free_all_rx_resources(adapter);
3135err_setup_rx:
3136 ixgbe_free_all_tx_resources(adapter);
3137err_setup_tx:
3138 ixgbe_reset(adapter);
3139
3140 return err;
3141}
3142
3143/**
3144 * ixgbe_close - Disables a network interface
3145 * @netdev: network interface device structure
3146 *
3147 * Returns 0, this is not allowed to fail
3148 *
3149 * The close entry point is called when an interface is de-activated
3150 * by the OS. The hardware is still under the drivers control, but
3151 * needs to be disabled. A global MAC reset is issued to stop the
3152 * hardware, and all transmit and receive resources are freed.
3153 **/
3154static int ixgbe_close(struct net_device *netdev)
3155{
3156 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003157
3158 ixgbe_down(adapter);
3159 ixgbe_free_irq(adapter);
3160
3161 ixgbe_free_all_tx_resources(adapter);
3162 ixgbe_free_all_rx_resources(adapter);
3163
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08003164 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07003165
3166 return 0;
3167}
3168
3169/**
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003170 * ixgbe_napi_add_all - prep napi structs for use
3171 * @adapter: private struct
3172 * helper function to napi_add each possible q_vector->napi
3173 */
Alexander Duyck2f90b862008-11-20 20:52:10 -08003174void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003175{
3176 int q_idx, q_vectors;
3177 int (*poll)(struct napi_struct *, int);
3178
3179 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3180 poll = &ixgbe_clean_rxonly;
3181 /* Only enable as many vectors as we have rx queues. */
3182 q_vectors = adapter->num_rx_queues;
3183 } else {
3184 poll = &ixgbe_poll;
3185 /* only one q_vector for legacy modes */
3186 q_vectors = 1;
3187 }
3188
3189 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
3190 struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
3191 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
3192 }
3193}
3194
Alexander Duyck2f90b862008-11-20 20:52:10 -08003195void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003196{
3197 int q_idx;
3198 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3199
3200 /* legacy and MSI only use one vector */
3201 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
3202 q_vectors = 1;
3203
3204 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
3205 struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
3206 if (!q_vector->rxr_count)
3207 continue;
3208 netif_napi_del(&q_vector->napi);
3209 }
3210}
3211
3212#ifdef CONFIG_PM
3213static int ixgbe_resume(struct pci_dev *pdev)
3214{
3215 struct net_device *netdev = pci_get_drvdata(pdev);
3216 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3217 u32 err;
3218
3219 pci_set_power_state(pdev, PCI_D0);
3220 pci_restore_state(pdev);
3221 err = pci_enable_device(pdev);
3222 if (err) {
Alexander Duyck69888672008-09-11 20:05:39 -07003223 printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07003224 "suspend\n");
3225 return err;
3226 }
3227 pci_set_master(pdev);
3228
3229 pci_enable_wake(pdev, PCI_D3hot, 0);
3230 pci_enable_wake(pdev, PCI_D3cold, 0);
3231
3232 err = ixgbe_init_interrupt_scheme(adapter);
3233 if (err) {
3234 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
3235 "device\n");
3236 return err;
3237 }
3238
3239 ixgbe_napi_add_all(adapter);
3240 ixgbe_reset(adapter);
3241
3242 if (netif_running(netdev)) {
3243 err = ixgbe_open(adapter->netdev);
3244 if (err)
3245 return err;
3246 }
3247
3248 netif_device_attach(netdev);
3249
3250 return 0;
3251}
3252
3253#endif /* CONFIG_PM */
3254static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
3255{
3256 struct net_device *netdev = pci_get_drvdata(pdev);
3257 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3258#ifdef CONFIG_PM
3259 int retval = 0;
3260#endif
3261
3262 netif_device_detach(netdev);
3263
3264 if (netif_running(netdev)) {
3265 ixgbe_down(adapter);
3266 ixgbe_free_irq(adapter);
3267 ixgbe_free_all_tx_resources(adapter);
3268 ixgbe_free_all_rx_resources(adapter);
3269 }
3270 ixgbe_reset_interrupt_capability(adapter);
3271 ixgbe_napi_del_all(adapter);
3272 kfree(adapter->tx_ring);
3273 kfree(adapter->rx_ring);
3274
3275#ifdef CONFIG_PM
3276 retval = pci_save_state(pdev);
3277 if (retval)
3278 return retval;
3279#endif
3280
3281 pci_enable_wake(pdev, PCI_D3hot, 0);
3282 pci_enable_wake(pdev, PCI_D3cold, 0);
3283
3284 ixgbe_release_hw_control(adapter);
3285
3286 pci_disable_device(pdev);
3287
3288 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3289
3290 return 0;
3291}
3292
3293static void ixgbe_shutdown(struct pci_dev *pdev)
3294{
3295 ixgbe_suspend(pdev, PMSG_SUSPEND);
3296}
3297
3298/**
Auke Kok9a799d72007-09-15 14:07:45 -07003299 * ixgbe_update_stats - Update the board statistics counters.
3300 * @adapter: board private structure
3301 **/
3302void ixgbe_update_stats(struct ixgbe_adapter *adapter)
3303{
3304 struct ixgbe_hw *hw = &adapter->hw;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003305 u64 total_mpc = 0;
3306 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07003307
3308 adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003309 for (i = 0; i < 8; i++) {
3310 /* for packet buffers not used, the register should read 0 */
3311 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
3312 missed_rx += mpc;
3313 adapter->stats.mpc[i] += mpc;
3314 total_mpc += adapter->stats.mpc[i];
3315 adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
Alexander Duyck2f90b862008-11-20 20:52:10 -08003316 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
3317 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
3318 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
3319 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
3320 adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
3321 IXGBE_PXONRXC(i));
3322 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
3323 IXGBE_PXONTXC(i));
3324 adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
3325 IXGBE_PXOFFRXC(i));
3326 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
3327 IXGBE_PXOFFTXC(i));
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003328 }
3329 adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
3330 /* work around hardware counting issue */
3331 adapter->stats.gprc -= missed_rx;
Auke Kok9a799d72007-09-15 14:07:45 -07003332
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003333 /* 82598 hardware only has a 32 bit counter in the high register */
Auke Kok9a799d72007-09-15 14:07:45 -07003334 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003335 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
3336 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
Auke Kok9a799d72007-09-15 14:07:45 -07003337 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
3338 adapter->stats.bprc += bprc;
3339 adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
3340 adapter->stats.mprc -= bprc;
3341 adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
3342 adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
3343 adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
3344 adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
3345 adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
3346 adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
3347 adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07003348 adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
3349 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
Auke Kok9a799d72007-09-15 14:07:45 -07003350 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003351 lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
3352 adapter->stats.lxontxc += lxon;
3353 lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
3354 adapter->stats.lxofftxc += lxoff;
Auke Kok9a799d72007-09-15 14:07:45 -07003355 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3356 adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003357 adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
3358 /*
3359 * 82598 errata - tx of flow control packets is included in tx counters
3360 */
3361 xon_off_tot = lxon + lxoff;
3362 adapter->stats.gptc -= xon_off_tot;
3363 adapter->stats.mptc -= xon_off_tot;
3364 adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
Auke Kok9a799d72007-09-15 14:07:45 -07003365 adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
3366 adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
3367 adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
Auke Kok9a799d72007-09-15 14:07:45 -07003368 adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
3369 adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003370 adapter->stats.ptc64 -= xon_off_tot;
Auke Kok9a799d72007-09-15 14:07:45 -07003371 adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
3372 adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
3373 adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
3374 adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
3375 adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
Auke Kok9a799d72007-09-15 14:07:45 -07003376 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
3377
3378 /* Fill out the OS statistics structure */
Auke Kok9a799d72007-09-15 14:07:45 -07003379 adapter->net_stats.multicast = adapter->stats.mprc;
3380
3381 /* Rx Errors */
3382 adapter->net_stats.rx_errors = adapter->stats.crcerrs +
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003383 adapter->stats.rlec;
Auke Kok9a799d72007-09-15 14:07:45 -07003384 adapter->net_stats.rx_dropped = 0;
3385 adapter->net_stats.rx_length_errors = adapter->stats.rlec;
3386 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
Ayyappan Veeraiyan6f11eef2008-02-01 15:59:14 -08003387 adapter->net_stats.rx_missed_errors = total_mpc;
Auke Kok9a799d72007-09-15 14:07:45 -07003388}
3389
3390/**
3391 * ixgbe_watchdog - Timer Call-back
3392 * @data: pointer to adapter cast into an unsigned long
3393 **/
3394static void ixgbe_watchdog(unsigned long data)
3395{
3396 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003397 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07003398
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003399 /* Do the watchdog outside of interrupt context due to the lovely
3400 * delays that some of the newer hardware requires */
3401 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
3402 /* Cause software interrupt to ensure rx rings are cleaned */
3403 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3404 u32 eics =
3405 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
3406 IXGBE_WRITE_REG(hw, IXGBE_EICS, eics);
3407 } else {
3408 /* For legacy and MSI interrupts don't set any bits that
3409 * are enabled for EIAM, because this operation would
3410 * set *both* EIMS and EICS for any bit in EIAM */
3411 IXGBE_WRITE_REG(hw, IXGBE_EICS,
3412 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
3413 }
3414 /* Reset the timer */
3415 mod_timer(&adapter->watchdog_timer,
3416 round_jiffies(jiffies + 2 * HZ));
3417 }
3418
3419 schedule_work(&adapter->watchdog_task);
3420}
3421
3422/**
Alexander Duyck69888672008-09-11 20:05:39 -07003423 * ixgbe_watchdog_task - worker thread to bring link up
3424 * @work: pointer to work_struct containing our data
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003425 **/
3426static void ixgbe_watchdog_task(struct work_struct *work)
3427{
3428 struct ixgbe_adapter *adapter = container_of(work,
3429 struct ixgbe_adapter,
3430 watchdog_task);
3431 struct net_device *netdev = adapter->netdev;
3432 struct ixgbe_hw *hw = &adapter->hw;
3433 u32 link_speed = adapter->link_speed;
3434 bool link_up = adapter->link_up;
3435
3436 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
3437
3438 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
3439 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
3440 if (link_up ||
3441 time_after(jiffies, (adapter->link_check_timeout +
3442 IXGBE_TRY_LINK_TIMEOUT))) {
3443 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
3444 adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
3445 }
3446 adapter->link_up = link_up;
3447 adapter->link_speed = link_speed;
3448 }
Auke Kok9a799d72007-09-15 14:07:45 -07003449
3450 if (link_up) {
3451 if (!netif_carrier_ok(netdev)) {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003452 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
3453 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
Auke Kok9a799d72007-09-15 14:07:45 -07003454#define FLOW_RX (frctl & IXGBE_FCTRL_RFCE)
3455#define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X)
Jeff Kirshera46e5342008-11-27 00:22:21 -08003456 printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
3457 "Flow Control: %s\n",
3458 netdev->name,
3459 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
3460 "10 Gbps" :
3461 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
3462 "1 Gbps" : "unknown speed")),
3463 ((FLOW_RX && FLOW_TX) ? "RX/TX" :
3464 (FLOW_RX ? "RX" :
3465 (FLOW_TX ? "TX" : "None"))));
Auke Kok9a799d72007-09-15 14:07:45 -07003466
3467 netif_carrier_on(netdev);
Alexander Duyck2f90b862008-11-20 20:52:10 -08003468 netif_tx_wake_all_queues(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003469 } else {
3470 /* Force detection of hung controller */
3471 adapter->detect_tx_hung = true;
3472 }
3473 } else {
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003474 adapter->link_up = false;
3475 adapter->link_speed = 0;
Auke Kok9a799d72007-09-15 14:07:45 -07003476 if (netif_carrier_ok(netdev)) {
Jeff Kirshera46e5342008-11-27 00:22:21 -08003477 printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
3478 netdev->name);
Auke Kok9a799d72007-09-15 14:07:45 -07003479 netif_carrier_off(netdev);
Alexander Duyck2f90b862008-11-20 20:52:10 -08003480 netif_tx_stop_all_queues(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07003481 }
3482 }
3483
3484 ixgbe_update_stats(adapter);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07003485 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
Auke Kok9a799d72007-09-15 14:07:45 -07003486}
3487
Auke Kok9a799d72007-09-15 14:07:45 -07003488static int ixgbe_tso(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003489 struct ixgbe_ring *tx_ring, struct sk_buff *skb,
3490 u32 tx_flags, u8 *hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07003491{
3492 struct ixgbe_adv_tx_context_desc *context_desc;
3493 unsigned int i;
3494 int err;
3495 struct ixgbe_tx_buffer *tx_buffer_info;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07003496 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
3497 u32 mss_l4len_idx, l4len;
Auke Kok9a799d72007-09-15 14:07:45 -07003498
3499 if (skb_is_gso(skb)) {
3500 if (skb_header_cloned(skb)) {
3501 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3502 if (err)
3503 return err;
3504 }
3505 l4len = tcp_hdrlen(skb);
3506 *hdr_len += l4len;
3507
Al Viro8327d002007-12-10 18:54:12 +00003508 if (skb->protocol == htons(ETH_P_IP)) {
Auke Kok9a799d72007-09-15 14:07:45 -07003509 struct iphdr *iph = ip_hdr(skb);
3510 iph->tot_len = 0;
3511 iph->check = 0;
3512 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003513 iph->daddr, 0,
3514 IPPROTO_TCP,
3515 0);
Auke Kok9a799d72007-09-15 14:07:45 -07003516 adapter->hw_tso_ctxt++;
3517 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
3518 ipv6_hdr(skb)->payload_len = 0;
3519 tcp_hdr(skb)->check =
3520 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003521 &ipv6_hdr(skb)->daddr,
3522 0, IPPROTO_TCP, 0);
Auke Kok9a799d72007-09-15 14:07:45 -07003523 adapter->hw_tso6_ctxt++;
3524 }
3525
3526 i = tx_ring->next_to_use;
3527
3528 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3529 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3530
3531 /* VLAN MACLEN IPLEN */
3532 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3533 vlan_macip_lens |=
3534 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3535 vlan_macip_lens |= ((skb_network_offset(skb)) <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003536 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07003537 *hdr_len += skb_network_offset(skb);
3538 vlan_macip_lens |=
3539 (skb_transport_header(skb) - skb_network_header(skb));
3540 *hdr_len +=
3541 (skb_transport_header(skb) - skb_network_header(skb));
3542 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3543 context_desc->seqnum_seed = 0;
3544
3545 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07003546 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003547 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07003548
Al Viro8327d002007-12-10 18:54:12 +00003549 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07003550 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
3551 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
3552 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
3553
3554 /* MSS L4LEN IDX */
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07003555 mss_l4len_idx =
Auke Kok9a799d72007-09-15 14:07:45 -07003556 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
3557 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07003558 /* use index 1 for TSO */
3559 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07003560 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3561
3562 tx_buffer_info->time_stamp = jiffies;
3563 tx_buffer_info->next_to_watch = i;
3564
3565 i++;
3566 if (i == tx_ring->count)
3567 i = 0;
3568 tx_ring->next_to_use = i;
3569
3570 return true;
3571 }
3572 return false;
3573}
3574
3575static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003576 struct ixgbe_ring *tx_ring,
3577 struct sk_buff *skb, u32 tx_flags)
Auke Kok9a799d72007-09-15 14:07:45 -07003578{
3579 struct ixgbe_adv_tx_context_desc *context_desc;
3580 unsigned int i;
3581 struct ixgbe_tx_buffer *tx_buffer_info;
3582 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
3583
3584 if (skb->ip_summed == CHECKSUM_PARTIAL ||
3585 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
3586 i = tx_ring->next_to_use;
3587 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3588 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
3589
3590 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3591 vlan_macip_lens |=
3592 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
3593 vlan_macip_lens |= (skb_network_offset(skb) <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003594 IXGBE_ADVTXD_MACLEN_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07003595 if (skb->ip_summed == CHECKSUM_PARTIAL)
3596 vlan_macip_lens |= (skb_transport_header(skb) -
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003597 skb_network_header(skb));
Auke Kok9a799d72007-09-15 14:07:45 -07003598
3599 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
3600 context_desc->seqnum_seed = 0;
3601
3602 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003603 IXGBE_ADVTXD_DTYP_CTXT);
Auke Kok9a799d72007-09-15 14:07:45 -07003604
3605 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Auke Kok41825d72008-02-12 15:20:33 -08003606 switch (skb->protocol) {
3607 case __constant_htons(ETH_P_IP):
Auke Kok9a799d72007-09-15 14:07:45 -07003608 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
Auke Kok41825d72008-02-12 15:20:33 -08003609 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3610 type_tucmd_mlhl |=
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003611 IXGBE_ADVTXD_TUCMD_L4T_TCP;
Auke Kok41825d72008-02-12 15:20:33 -08003612 break;
Auke Kok41825d72008-02-12 15:20:33 -08003613 case __constant_htons(ETH_P_IPV6):
3614 /* XXX what about other V6 headers?? */
3615 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3616 type_tucmd_mlhl |=
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003617 IXGBE_ADVTXD_TUCMD_L4T_TCP;
Auke Kok41825d72008-02-12 15:20:33 -08003618 break;
Auke Kok41825d72008-02-12 15:20:33 -08003619 default:
3620 if (unlikely(net_ratelimit())) {
3621 DPRINTK(PROBE, WARNING,
3622 "partial checksum but proto=%x!\n",
3623 skb->protocol);
3624 }
3625 break;
3626 }
Auke Kok9a799d72007-09-15 14:07:45 -07003627 }
3628
3629 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07003630 /* use index zero for tx checksum offload */
Auke Kok9a799d72007-09-15 14:07:45 -07003631 context_desc->mss_l4len_idx = 0;
3632
3633 tx_buffer_info->time_stamp = jiffies;
3634 tx_buffer_info->next_to_watch = i;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07003635
Auke Kok9a799d72007-09-15 14:07:45 -07003636 adapter->hw_csum_tx_good++;
3637 i++;
3638 if (i == tx_ring->count)
3639 i = 0;
3640 tx_ring->next_to_use = i;
3641
3642 return true;
3643 }
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07003644
Auke Kok9a799d72007-09-15 14:07:45 -07003645 return false;
3646}
3647
3648static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003649 struct ixgbe_ring *tx_ring,
3650 struct sk_buff *skb, unsigned int first)
Auke Kok9a799d72007-09-15 14:07:45 -07003651{
3652 struct ixgbe_tx_buffer *tx_buffer_info;
3653 unsigned int len = skb->len;
3654 unsigned int offset = 0, size, count = 0, i;
3655 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
3656 unsigned int f;
3657
3658 len -= skb->data_len;
3659
3660 i = tx_ring->next_to_use;
3661
3662 while (len) {
3663 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3664 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3665
3666 tx_buffer_info->length = size;
3667 tx_buffer_info->dma = pci_map_single(adapter->pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003668 skb->data + offset,
3669 size, PCI_DMA_TODEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07003670 tx_buffer_info->time_stamp = jiffies;
3671 tx_buffer_info->next_to_watch = i;
3672
3673 len -= size;
3674 offset += size;
3675 count++;
3676 i++;
3677 if (i == tx_ring->count)
3678 i = 0;
3679 }
3680
3681 for (f = 0; f < nr_frags; f++) {
3682 struct skb_frag_struct *frag;
3683
3684 frag = &skb_shinfo(skb)->frags[f];
3685 len = frag->size;
3686 offset = frag->page_offset;
3687
3688 while (len) {
3689 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3690 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
3691
3692 tx_buffer_info->length = size;
3693 tx_buffer_info->dma = pci_map_page(adapter->pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003694 frag->page,
3695 offset,
3696 size,
3697 PCI_DMA_TODEVICE);
Auke Kok9a799d72007-09-15 14:07:45 -07003698 tx_buffer_info->time_stamp = jiffies;
3699 tx_buffer_info->next_to_watch = i;
3700
3701 len -= size;
3702 offset += size;
3703 count++;
3704 i++;
3705 if (i == tx_ring->count)
3706 i = 0;
3707 }
3708 }
3709 if (i == 0)
3710 i = tx_ring->count - 1;
3711 else
3712 i = i - 1;
3713 tx_ring->tx_buffer_info[i].skb = skb;
3714 tx_ring->tx_buffer_info[first].next_to_watch = i;
3715
3716 return count;
3717}
3718
3719static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003720 struct ixgbe_ring *tx_ring,
3721 int tx_flags, int count, u32 paylen, u8 hdr_len)
Auke Kok9a799d72007-09-15 14:07:45 -07003722{
3723 union ixgbe_adv_tx_desc *tx_desc = NULL;
3724 struct ixgbe_tx_buffer *tx_buffer_info;
3725 u32 olinfo_status = 0, cmd_type_len = 0;
3726 unsigned int i;
3727 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
3728
3729 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
3730
3731 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3732
3733 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3734 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3735
3736 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3737 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3738
3739 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003740 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07003741
PJ Waskiewicz4eeae6f2008-08-26 04:27:30 -07003742 /* use index 1 context for tso */
3743 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
Auke Kok9a799d72007-09-15 14:07:45 -07003744 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3745 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003746 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07003747
3748 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3749 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003750 IXGBE_ADVTXD_POPTS_SHIFT;
Auke Kok9a799d72007-09-15 14:07:45 -07003751
3752 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3753
3754 i = tx_ring->next_to_use;
3755 while (count--) {
3756 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3757 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3758 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3759 tx_desc->read.cmd_type_len =
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003760 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
Auke Kok9a799d72007-09-15 14:07:45 -07003761 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
Auke Kok9a799d72007-09-15 14:07:45 -07003762 i++;
3763 if (i == tx_ring->count)
3764 i = 0;
3765 }
3766
3767 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3768
3769 /*
3770 * Force memory writes to complete before letting h/w
3771 * know there are new descriptors to fetch. (Only
3772 * applicable for weak-ordered memory model archs,
3773 * such as IA-64).
3774 */
3775 wmb();
3776
3777 tx_ring->next_to_use = i;
3778 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3779}
3780
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003781static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003782 struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003783{
3784 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3785
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003786 netif_stop_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003787 /* Herbert's original patch had:
3788 * smp_mb__after_netif_stop_queue();
3789 * but since that doesn't exist yet, just open code it. */
3790 smp_mb();
3791
3792 /* We need to check again in a case another CPU has just
3793 * made room available. */
3794 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3795 return -EBUSY;
3796
3797 /* A reprieve! - use start_queue because it doesn't call schedule */
Jesse Brandeburgaf721662008-09-11 19:54:23 -07003798 netif_start_subqueue(netdev, tx_ring->queue_index);
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003799 ++adapter->restart_queue;
3800 return 0;
3801}
3802
3803static int ixgbe_maybe_stop_tx(struct net_device *netdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003804 struct ixgbe_ring *tx_ring, int size)
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003805{
3806 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3807 return 0;
3808 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
3809}
3810
Auke Kok9a799d72007-09-15 14:07:45 -07003811static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3812{
3813 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3814 struct ixgbe_ring *tx_ring;
Auke Kok9a799d72007-09-15 14:07:45 -07003815 unsigned int first;
3816 unsigned int tx_flags = 0;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003817 u8 hdr_len = 0;
3818 int r_idx = 0, tso;
Auke Kok9a799d72007-09-15 14:07:45 -07003819 int count = 0;
3820 unsigned int f;
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07003821
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003822 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08003823 tx_ring = &adapter->tx_ring[r_idx];
Auke Kok9a799d72007-09-15 14:07:45 -07003824
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07003825 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3826 tx_flags |= vlan_tx_tag_get(skb);
Alexander Duyck2f90b862008-11-20 20:52:10 -08003827 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3828 tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
3829 tx_flags |= (skb->queue_mapping << 13);
3830 }
3831 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3832 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3833 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3834 tx_flags |= (skb->queue_mapping << 13);
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07003835 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3836 tx_flags |= IXGBE_TX_FLAGS_VLAN;
Auke Kok9a799d72007-09-15 14:07:45 -07003837 }
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07003838 /* three things can cause us to need a context descriptor */
3839 if (skb_is_gso(skb) ||
3840 (skb->ip_summed == CHECKSUM_PARTIAL) ||
3841 (tx_flags & IXGBE_TX_FLAGS_VLAN))
Auke Kok9a799d72007-09-15 14:07:45 -07003842 count++;
3843
Jesse Brandeburg9f8cdf42008-09-11 20:03:35 -07003844 count += TXD_USE_COUNT(skb_headlen(skb));
3845 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
Auke Kok9a799d72007-09-15 14:07:45 -07003846 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
3847
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003848 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
Auke Kok9a799d72007-09-15 14:07:45 -07003849 adapter->tx_busy++;
Auke Kok9a799d72007-09-15 14:07:45 -07003850 return NETDEV_TX_BUSY;
3851 }
Auke Kok9a799d72007-09-15 14:07:45 -07003852
Al Viro8327d002007-12-10 18:54:12 +00003853 if (skb->protocol == htons(ETH_P_IP))
Auke Kok9a799d72007-09-15 14:07:45 -07003854 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3855 first = tx_ring->next_to_use;
3856 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3857 if (tso < 0) {
3858 dev_kfree_skb_any(skb);
3859 return NETDEV_TX_OK;
3860 }
3861
3862 if (tso)
3863 tx_flags |= IXGBE_TX_FLAGS_TSO;
3864 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003865 (skb->ip_summed == CHECKSUM_PARTIAL))
Auke Kok9a799d72007-09-15 14:07:45 -07003866 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3867
3868 ixgbe_tx_queue(adapter, tx_ring, tx_flags,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003869 ixgbe_tx_map(adapter, tx_ring, skb, first),
3870 skb->len, hdr_len);
Auke Kok9a799d72007-09-15 14:07:45 -07003871
3872 netdev->trans_start = jiffies;
3873
Ayyappan Veeraiyane092be62008-02-01 15:58:49 -08003874 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
Auke Kok9a799d72007-09-15 14:07:45 -07003875
3876 return NETDEV_TX_OK;
3877}
3878
3879/**
3880 * ixgbe_get_stats - Get System Network Statistics
3881 * @netdev: network interface device structure
3882 *
3883 * Returns the address of the device statistics structure.
3884 * The statistics are actually updated from the timer callback.
3885 **/
3886static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
3887{
3888 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3889
3890 /* only return the current stats */
3891 return &adapter->net_stats;
3892}
3893
3894/**
3895 * ixgbe_set_mac - Change the Ethernet Address of the NIC
3896 * @netdev: network interface device structure
3897 * @p: pointer to an address structure
3898 *
3899 * Returns 0 on success, negative on failure
3900 **/
3901static int ixgbe_set_mac(struct net_device *netdev, void *p)
3902{
3903 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003904 struct ixgbe_hw *hw = &adapter->hw;
Auke Kok9a799d72007-09-15 14:07:45 -07003905 struct sockaddr *addr = p;
3906
3907 if (!is_valid_ether_addr(addr->sa_data))
3908 return -EADDRNOTAVAIL;
3909
3910 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003911 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
Auke Kok9a799d72007-09-15 14:07:45 -07003912
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003913 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
Auke Kok9a799d72007-09-15 14:07:45 -07003914
3915 return 0;
3916}
3917
3918#ifdef CONFIG_NET_POLL_CONTROLLER
3919/*
3920 * Polling 'interrupt' - used by things like netconsole to send skbs
3921 * without having to re-enable interrupts. It's not called while
3922 * the interrupt routine is executing.
3923 */
3924static void ixgbe_netpoll(struct net_device *netdev)
3925{
3926 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3927
3928 disable_irq(adapter->pdev->irq);
3929 adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
3930 ixgbe_intr(adapter->pdev->irq, netdev);
3931 adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
3932 enable_irq(adapter->pdev->irq);
3933}
3934#endif
3935
3936/**
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003937 * ixgbe_link_config - set up initial link with default speed and duplex
3938 * @hw: pointer to private hardware struct
3939 *
3940 * Returns 0 on success, negative on failure
3941 **/
3942static int ixgbe_link_config(struct ixgbe_hw *hw)
3943{
3944 u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL;
3945
3946 /* must always autoneg for both 1G and 10G link */
3947 hw->mac.autoneg = true;
3948
Jesse Brandeburg0befdb32008-10-31 00:46:40 -07003949 if ((hw->mac.type == ixgbe_mac_82598EB) &&
3950 (hw->phy.media_type == ixgbe_media_type_copper))
3951 autoneg = IXGBE_LINK_SPEED_82598_AUTONEG;
3952
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003953 return hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
3954}
3955
Stephen Hemminger0edc3522008-11-19 22:24:29 -08003956static const struct net_device_ops ixgbe_netdev_ops = {
3957 .ndo_open = ixgbe_open,
3958 .ndo_stop = ixgbe_close,
Stephen Hemminger00829822008-11-20 20:14:53 -08003959 .ndo_start_xmit = ixgbe_xmit_frame,
Stephen Hemminger0edc3522008-11-19 22:24:29 -08003960 .ndo_get_stats = ixgbe_get_stats,
3961 .ndo_set_multicast_list = ixgbe_set_rx_mode,
3962 .ndo_validate_addr = eth_validate_addr,
3963 .ndo_set_mac_address = ixgbe_set_mac,
3964 .ndo_change_mtu = ixgbe_change_mtu,
3965 .ndo_tx_timeout = ixgbe_tx_timeout,
3966 .ndo_vlan_rx_register = ixgbe_vlan_rx_register,
3967 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
3968 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
3969#ifdef CONFIG_NET_POLL_CONTROLLER
3970 .ndo_poll_controller = ixgbe_netpoll,
3971#endif
3972};
3973
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003974/**
Auke Kok9a799d72007-09-15 14:07:45 -07003975 * ixgbe_probe - Device Initialization Routine
3976 * @pdev: PCI device information struct
3977 * @ent: entry in ixgbe_pci_tbl
3978 *
3979 * Returns 0 on success, negative on failure
3980 *
3981 * ixgbe_probe initializes an adapter identified by a pci_dev structure.
3982 * The OS initialization, configuring of the adapter private structure,
3983 * and a hardware reset occur.
3984 **/
3985static int __devinit ixgbe_probe(struct pci_dev *pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07003986 const struct pci_device_id *ent)
Auke Kok9a799d72007-09-15 14:07:45 -07003987{
3988 struct net_device *netdev;
3989 struct ixgbe_adapter *adapter = NULL;
3990 struct ixgbe_hw *hw;
3991 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
Auke Kok9a799d72007-09-15 14:07:45 -07003992 static int cards_found;
3993 int i, err, pci_using_dac;
3994 u16 link_status, link_speed, link_width;
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07003995 u32 part_num, eec;
Auke Kok9a799d72007-09-15 14:07:45 -07003996
3997 err = pci_enable_device(pdev);
3998 if (err)
3999 return err;
4000
4001 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
4002 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
4003 pci_using_dac = 1;
4004 } else {
4005 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4006 if (err) {
4007 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
4008 if (err) {
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004009 dev_err(&pdev->dev, "No usable DMA "
4010 "configuration, aborting\n");
Auke Kok9a799d72007-09-15 14:07:45 -07004011 goto err_dma;
4012 }
4013 }
4014 pci_using_dac = 0;
4015 }
4016
4017 err = pci_request_regions(pdev, ixgbe_driver_name);
4018 if (err) {
4019 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
4020 goto err_pci_reg;
4021 }
4022
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08004023 err = pci_enable_pcie_error_reporting(pdev);
4024 if (err) {
4025 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4026 "0x%x\n", err);
4027 /* non-fatal, continue */
4028 }
4029
Auke Kok9a799d72007-09-15 14:07:45 -07004030 pci_set_master(pdev);
Wendy Xiongfb3b27b2008-04-23 11:09:24 -07004031 pci_save_state(pdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004032
Ayyappan Veeraiyan30eba972008-03-03 15:03:52 -08004033 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
Auke Kok9a799d72007-09-15 14:07:45 -07004034 if (!netdev) {
4035 err = -ENOMEM;
4036 goto err_alloc_etherdev;
4037 }
4038
Auke Kok9a799d72007-09-15 14:07:45 -07004039 SET_NETDEV_DEV(netdev, &pdev->dev);
4040
4041 pci_set_drvdata(pdev, netdev);
4042 adapter = netdev_priv(netdev);
4043
4044 adapter->netdev = netdev;
4045 adapter->pdev = pdev;
4046 hw = &adapter->hw;
4047 hw->back = adapter;
4048 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
4049
Jeff Kirsher05857982008-09-11 19:57:00 -07004050 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
4051 pci_resource_len(pdev, 0));
Auke Kok9a799d72007-09-15 14:07:45 -07004052 if (!hw->hw_addr) {
4053 err = -EIO;
4054 goto err_ioremap;
4055 }
4056
4057 for (i = 1; i <= 5; i++) {
4058 if (pci_resource_len(pdev, i) == 0)
4059 continue;
4060 }
4061
Stephen Hemminger0edc3522008-11-19 22:24:29 -08004062 netdev->netdev_ops = &ixgbe_netdev_ops;
Auke Kok9a799d72007-09-15 14:07:45 -07004063 ixgbe_set_ethtool_ops(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004064 netdev->watchdog_timeo = 5 * HZ;
Auke Kok9a799d72007-09-15 14:07:45 -07004065 strcpy(netdev->name, pci_name(pdev));
4066
Auke Kok9a799d72007-09-15 14:07:45 -07004067 adapter->bd_number = cards_found;
4068
Auke Kok9a799d72007-09-15 14:07:45 -07004069 /* Setup hw api */
4070 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004071 hw->mac.type = ii->mac;
Auke Kok9a799d72007-09-15 14:07:45 -07004072
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004073 /* EEPROM */
4074 memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
4075 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
4076 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
4077 if (!(eec & (1 << 8)))
4078 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
4079
4080 /* PHY */
4081 memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
Donald Skidmorec4900be2008-11-20 21:11:42 -08004082 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
4083
4084 /* set up this timer and work struct before calling get_invariants
4085 * which might start the timer
4086 */
4087 init_timer(&adapter->sfp_timer);
4088 adapter->sfp_timer.function = &ixgbe_sfp_timer;
4089 adapter->sfp_timer.data = (unsigned long) adapter;
4090
4091 INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004092
Auke Kok9a799d72007-09-15 14:07:45 -07004093 err = ii->get_invariants(hw);
Donald Skidmorec4900be2008-11-20 21:11:42 -08004094 if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
4095 /* start a kernel thread to watch for a module to arrive */
4096 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4097 mod_timer(&adapter->sfp_timer,
4098 round_jiffies(jiffies + (2 * HZ)));
4099 err = 0;
4100 } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4101 DPRINTK(PROBE, ERR, "failed to load because an "
4102 "unsupported SFP+ module type was detected.\n");
Auke Kok9a799d72007-09-15 14:07:45 -07004103 goto err_hw_init;
Donald Skidmorec4900be2008-11-20 21:11:42 -08004104 } else if (err) {
4105 goto err_hw_init;
4106 }
Auke Kok9a799d72007-09-15 14:07:45 -07004107
4108 /* setup the private structure */
4109 err = ixgbe_sw_init(adapter);
4110 if (err)
4111 goto err_sw_init;
4112
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004113 /* reset_hw fills in the perm_addr as well */
4114 err = hw->mac.ops.reset_hw(hw);
4115 if (err) {
4116 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
4117 goto err_sw_init;
4118 }
4119
Auke Kok9a799d72007-09-15 14:07:45 -07004120 netdev->features = NETIF_F_SG |
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004121 NETIF_F_IP_CSUM |
4122 NETIF_F_HW_VLAN_TX |
4123 NETIF_F_HW_VLAN_RX |
4124 NETIF_F_HW_VLAN_FILTER;
Auke Kok9a799d72007-09-15 14:07:45 -07004125
Jesse Brandeburge9990a92008-08-26 04:27:24 -07004126 netdev->features |= NETIF_F_IPV6_CSUM;
Auke Kok9a799d72007-09-15 14:07:45 -07004127 netdev->features |= NETIF_F_TSO;
Auke Kok9a799d72007-09-15 14:07:45 -07004128 netdev->features |= NETIF_F_TSO6;
Jesse Brandeburge9990a92008-08-26 04:27:24 -07004129 netdev->features |= NETIF_F_LRO;
Jeff Kirsherad31c402008-06-05 04:05:30 -07004130
4131 netdev->vlan_features |= NETIF_F_TSO;
4132 netdev->vlan_features |= NETIF_F_TSO6;
Jesse Brandeburg22f32b7a52008-08-26 04:27:18 -07004133 netdev->vlan_features |= NETIF_F_IP_CSUM;
Jeff Kirsherad31c402008-06-05 04:05:30 -07004134 netdev->vlan_features |= NETIF_F_SG;
4135
Alexander Duyck2f90b862008-11-20 20:52:10 -08004136 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
4137 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4138
Jeff Kirsher7a6b6f52008-11-25 01:02:08 -08004139#ifdef CONFIG_IXGBE_DCB
Alexander Duyck2f90b862008-11-20 20:52:10 -08004140 netdev->dcbnl_ops = &dcbnl_ops;
4141#endif
4142
Auke Kok9a799d72007-09-15 14:07:45 -07004143 if (pci_using_dac)
4144 netdev->features |= NETIF_F_HIGHDMA;
4145
Auke Kok9a799d72007-09-15 14:07:45 -07004146 /* make sure the EEPROM is good */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004147 if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
Auke Kok9a799d72007-09-15 14:07:45 -07004148 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
4149 err = -EIO;
4150 goto err_eeprom;
4151 }
4152
4153 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
4154 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
4155
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004156 if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
4157 dev_err(&pdev->dev, "invalid MAC address\n");
Auke Kok9a799d72007-09-15 14:07:45 -07004158 err = -EIO;
4159 goto err_eeprom;
4160 }
4161
4162 init_timer(&adapter->watchdog_timer);
4163 adapter->watchdog_timer.function = &ixgbe_watchdog;
4164 adapter->watchdog_timer.data = (unsigned long)adapter;
4165
4166 INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
Jesse Brandeburgcf8280e2008-09-11 19:55:32 -07004167 INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
Auke Kok9a799d72007-09-15 14:07:45 -07004168
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004169 err = ixgbe_init_interrupt_scheme(adapter);
4170 if (err)
4171 goto err_sw_init;
Auke Kok9a799d72007-09-15 14:07:45 -07004172
4173 /* print bus type/speed/width info */
4174 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
4175 link_speed = link_status & IXGBE_PCI_LINK_SPEED;
4176 link_width = link_status & IXGBE_PCI_LINK_WIDTH;
Johannes Berg7c510e42008-10-27 17:47:26 -07004177 dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004178 ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" :
4179 (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" :
4180 "Unknown"),
4181 ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" :
4182 (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" :
4183 (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" :
4184 (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" :
4185 "Unknown"),
Johannes Berg7c510e42008-10-27 17:47:26 -07004186 netdev->dev_addr);
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004187 ixgbe_read_pba_num_generic(hw, &part_num);
Auke Kok9a799d72007-09-15 14:07:45 -07004188 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004189 hw->mac.type, hw->phy.type,
4190 (part_num >> 8), (part_num & 0xff));
Auke Kok9a799d72007-09-15 14:07:45 -07004191
Auke Kok0c254d82008-02-11 09:25:56 -08004192 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) {
4193 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004194 "this card is not sufficient for optimal "
4195 "performance.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08004196 dev_warn(&pdev->dev, "For optimal performance a x8 "
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004197 "PCI-Express slot is required.\n");
Auke Kok0c254d82008-02-11 09:25:56 -08004198 }
4199
Auke Kok9a799d72007-09-15 14:07:45 -07004200 /* reset the hardware with the new settings */
Jesse Brandeburgc44ade92008-09-11 19:59:59 -07004201 hw->mac.ops.start_hw(hw);
4202
4203 /* link_config depends on start_hw being called at least once */
4204 err = ixgbe_link_config(hw);
4205 if (err) {
4206 dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err);
4207 goto err_register;
4208 }
Auke Kok9a799d72007-09-15 14:07:45 -07004209
4210 netif_carrier_off(netdev);
Alexander Duyck2f90b862008-11-20 20:52:10 -08004211 netif_tx_stop_all_queues(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004212
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004213 ixgbe_napi_add_all(adapter);
4214
Auke Kok9a799d72007-09-15 14:07:45 -07004215 strcpy(netdev->name, "eth%d");
4216 err = register_netdev(netdev);
4217 if (err)
4218 goto err_register;
4219
Jeff Garzik5dd2d332008-10-16 05:09:31 -04004220#ifdef CONFIG_IXGBE_DCA
Denis V. Lunev652f0932008-03-27 14:39:17 +03004221 if (dca_add_requester(&pdev->dev) == 0) {
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004222 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
4223 /* always use CB2 mode, difference is masked
4224 * in the CB driver */
4225 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
4226 ixgbe_setup_dca(adapter);
4227 }
4228#endif
Auke Kok9a799d72007-09-15 14:07:45 -07004229
4230 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
4231 cards_found++;
4232 return 0;
4233
4234err_register:
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08004235 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004236err_hw_init:
4237err_sw_init:
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004238 ixgbe_reset_interrupt_capability(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004239err_eeprom:
Donald Skidmorec4900be2008-11-20 21:11:42 -08004240 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
4241 del_timer_sync(&adapter->sfp_timer);
4242 cancel_work_sync(&adapter->sfp_task);
Auke Kok9a799d72007-09-15 14:07:45 -07004243 iounmap(hw->hw_addr);
4244err_ioremap:
4245 free_netdev(netdev);
4246err_alloc_etherdev:
4247 pci_release_regions(pdev);
4248err_pci_reg:
4249err_dma:
4250 pci_disable_device(pdev);
4251 return err;
4252}
4253
4254/**
4255 * ixgbe_remove - Device Removal Routine
4256 * @pdev: PCI device information struct
4257 *
4258 * ixgbe_remove is called by the PCI subsystem to alert the driver
4259 * that it should release a PCI device. The could be caused by a
4260 * Hot-Plug event, or because the driver is going to be removed from
4261 * memory.
4262 **/
4263static void __devexit ixgbe_remove(struct pci_dev *pdev)
4264{
4265 struct net_device *netdev = pci_get_drvdata(pdev);
4266 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08004267 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07004268
4269 set_bit(__IXGBE_DOWN, &adapter->state);
Donald Skidmorec4900be2008-11-20 21:11:42 -08004270 /* clear the module not found bit to make sure the worker won't
4271 * reschedule
4272 */
4273 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
Auke Kok9a799d72007-09-15 14:07:45 -07004274 del_timer_sync(&adapter->watchdog_timer);
4275
Donald Skidmorec4900be2008-11-20 21:11:42 -08004276 del_timer_sync(&adapter->sfp_timer);
4277 cancel_work_sync(&adapter->watchdog_task);
4278 cancel_work_sync(&adapter->sfp_task);
Auke Kok9a799d72007-09-15 14:07:45 -07004279 flush_scheduled_work();
4280
Jeff Garzik5dd2d332008-10-16 05:09:31 -04004281#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004282 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
4283 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
4284 dca_remove_requester(&pdev->dev);
4285 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
4286 }
4287
4288#endif
Donald Skidmorec4900be2008-11-20 21:11:42 -08004289 if (netdev->reg_state == NETREG_REGISTERED)
4290 unregister_netdev(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004291
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004292 ixgbe_reset_interrupt_capability(adapter);
Ayyappan Veeraiyan5eba3692008-02-01 15:59:04 -08004293
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004294 ixgbe_release_hw_control(adapter);
Auke Kok9a799d72007-09-15 14:07:45 -07004295
4296 iounmap(adapter->hw.hw_addr);
4297 pci_release_regions(pdev);
4298
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004299 DPRINTK(PROBE, INFO, "complete\n");
Alexander Duyckb3c8b4b2008-09-11 20:04:56 -07004300 ixgbe_napi_del_all(adapter);
Ayyappan Veeraiyan021230d2008-03-03 15:03:45 -08004301 kfree(adapter->tx_ring);
4302 kfree(adapter->rx_ring);
4303
Auke Kok9a799d72007-09-15 14:07:45 -07004304 free_netdev(netdev);
4305
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08004306 err = pci_disable_pcie_error_reporting(pdev);
4307 if (err)
4308 dev_err(&pdev->dev,
4309 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
4310
Auke Kok9a799d72007-09-15 14:07:45 -07004311 pci_disable_device(pdev);
4312}
4313
4314/**
4315 * ixgbe_io_error_detected - called when PCI error is detected
4316 * @pdev: Pointer to PCI device
4317 * @state: The current pci connection state
4318 *
4319 * This function is called after a PCI bus error affecting
4320 * this device has been detected.
4321 */
4322static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004323 pci_channel_state_t state)
Auke Kok9a799d72007-09-15 14:07:45 -07004324{
4325 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08004326 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004327
4328 netif_device_detach(netdev);
4329
4330 if (netif_running(netdev))
4331 ixgbe_down(adapter);
4332 pci_disable_device(pdev);
4333
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004334 /* Request a slot reset. */
Auke Kok9a799d72007-09-15 14:07:45 -07004335 return PCI_ERS_RESULT_NEED_RESET;
4336}
4337
4338/**
4339 * ixgbe_io_slot_reset - called after the pci bus has been reset.
4340 * @pdev: Pointer to PCI device
4341 *
4342 * Restart the card from scratch, as if from a cold-boot.
4343 */
4344static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
4345{
4346 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08004347 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08004348 pci_ers_result_t result;
4349 int err;
Auke Kok9a799d72007-09-15 14:07:45 -07004350
4351 if (pci_enable_device(pdev)) {
4352 DPRINTK(PROBE, ERR,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004353 "Cannot re-enable PCI device after reset.\n");
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08004354 result = PCI_ERS_RESULT_DISCONNECT;
4355 } else {
4356 pci_set_master(pdev);
4357 pci_restore_state(pdev);
4358
4359 pci_enable_wake(pdev, PCI_D3hot, 0);
4360 pci_enable_wake(pdev, PCI_D3cold, 0);
4361
4362 ixgbe_reset(adapter);
4363
4364 result = PCI_ERS_RESULT_RECOVERED;
Auke Kok9a799d72007-09-15 14:07:45 -07004365 }
Auke Kok9a799d72007-09-15 14:07:45 -07004366
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08004367 err = pci_cleanup_aer_uncorrect_error_status(pdev);
4368 if (err) {
4369 dev_err(&pdev->dev,
4370 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
4371 /* non-fatal, continue */
4372 }
Auke Kok9a799d72007-09-15 14:07:45 -07004373
Peter P Waskiewicz Jr6fabd712008-12-10 01:13:08 -08004374 return result;
Auke Kok9a799d72007-09-15 14:07:45 -07004375}
4376
4377/**
4378 * ixgbe_io_resume - called when traffic can start flowing again.
4379 * @pdev: Pointer to PCI device
4380 *
4381 * This callback is called when the error recovery driver tells us that
4382 * its OK to resume normal operation.
4383 */
4384static void ixgbe_io_resume(struct pci_dev *pdev)
4385{
4386 struct net_device *netdev = pci_get_drvdata(pdev);
Wang Chen454d7c92008-11-12 23:37:49 -08004387 struct ixgbe_adapter *adapter = netdev_priv(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004388
4389 if (netif_running(netdev)) {
4390 if (ixgbe_up(adapter)) {
4391 DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
4392 return;
4393 }
4394 }
4395
4396 netif_device_attach(netdev);
Auke Kok9a799d72007-09-15 14:07:45 -07004397}
4398
4399static struct pci_error_handlers ixgbe_err_handler = {
4400 .error_detected = ixgbe_io_error_detected,
4401 .slot_reset = ixgbe_io_slot_reset,
4402 .resume = ixgbe_io_resume,
4403};
4404
4405static struct pci_driver ixgbe_driver = {
4406 .name = ixgbe_driver_name,
4407 .id_table = ixgbe_pci_tbl,
4408 .probe = ixgbe_probe,
4409 .remove = __devexit_p(ixgbe_remove),
4410#ifdef CONFIG_PM
4411 .suspend = ixgbe_suspend,
4412 .resume = ixgbe_resume,
4413#endif
4414 .shutdown = ixgbe_shutdown,
4415 .err_handler = &ixgbe_err_handler
4416};
4417
4418/**
4419 * ixgbe_init_module - Driver Registration Routine
4420 *
4421 * ixgbe_init_module is the first routine called when the driver is
4422 * loaded. All it does is register with the PCI subsystem.
4423 **/
4424static int __init ixgbe_init_module(void)
4425{
4426 int ret;
4427 printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
4428 ixgbe_driver_string, ixgbe_driver_version);
4429
4430 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
4431
Jeff Garzik5dd2d332008-10-16 05:09:31 -04004432#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004433 dca_register_notify(&dca_notifier);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004434#endif
Jeff Garzik5dd2d332008-10-16 05:09:31 -04004435
Auke Kok9a799d72007-09-15 14:07:45 -07004436 ret = pci_register_driver(&ixgbe_driver);
4437 return ret;
4438}
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004439
Auke Kok9a799d72007-09-15 14:07:45 -07004440module_init(ixgbe_init_module);
4441
4442/**
4443 * ixgbe_exit_module - Driver Exit Cleanup Routine
4444 *
4445 * ixgbe_exit_module is called just before the driver is removed
4446 * from memory.
4447 **/
4448static void __exit ixgbe_exit_module(void)
4449{
Jeff Garzik5dd2d332008-10-16 05:09:31 -04004450#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004451 dca_unregister_notify(&dca_notifier);
4452#endif
Auke Kok9a799d72007-09-15 14:07:45 -07004453 pci_unregister_driver(&ixgbe_driver);
4454}
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004455
Jeff Garzik5dd2d332008-10-16 05:09:31 -04004456#ifdef CONFIG_IXGBE_DCA
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004457static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004458 void *p)
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004459{
4460 int ret_val;
4461
4462 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
Peter P Waskiewiczb4617242008-09-11 20:04:46 -07004463 __ixgbe_notify_dca);
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004464
4465 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
4466}
Jeff Garzik5dd2d332008-10-16 05:09:31 -04004467#endif /* CONFIG_IXGBE_DCA */
Jeb Cramerbd0362d2008-03-03 15:04:02 -08004468
Auke Kok9a799d72007-09-15 14:07:45 -07004469module_exit(ixgbe_exit_module);
4470
4471/* ixgbe_main.c */