blob: 15d878b4547c3269a96380f45fee4a23e822f13f [file] [log] [blame]
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001/******************************************************************************
2* This software may be used and distributed according to the terms of
3* the GNU General Public License (GPL), incorporated herein by reference.
4* Drivers based on or derived from this code fall under the GPL and must
5* retain the authorship, copyright and license notice. This file is not
6* a complete program and may only be used when the entire operating
7* system is licensed under the GPL.
8* See the file COPYING in this distribution for more information.
9*
Jon Mason926bd902010-07-15 08:47:26 +000010* vxge-main.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +000011* Virtualized Server Adapter.
Jon Mason926bd902010-07-15 08:47:26 +000012* Copyright(c) 2002-2010 Exar Corp.
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +000013*
14* The module loadable parameters that are supported by the driver and a brief
15* explanation of all the variables:
16* vlan_tag_strip:
17* Strip VLAN Tag enable/disable. Instructs the device to remove
18* the VLAN tag from all received tagged frames that are not
19* replicated at the internal L2 switch.
20* 0 - Do not strip the VLAN tag.
21* 1 - Strip the VLAN tag.
22*
23* addr_learn_en:
24* Enable learning the mac address of the guest OS interface in
25* a virtualization environment.
26* 0 - DISABLE
27* 1 - ENABLE
28*
29* max_config_port:
30* Maximum number of port to be supported.
31* MIN -1 and MAX - 2
32*
33* max_config_vpath:
34* This configures the maximum no of VPATH configures for each
35* device function.
36* MIN - 1 and MAX - 17
37*
38* max_config_dev:
39* This configures maximum no of Device function to be enabled.
40* MIN - 1 and MAX - 17
41*
42******************************************************************************/
43
Joe Perches75f5e1c2010-07-27 11:47:03 +000044#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +000046#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000047#include <linux/interrupt.h>
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +000048#include <linux/pci.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090049#include <linux/slab.h>
Alexander Beregalov2b05e002009-04-04 16:36:18 -070050#include <linux/tcp.h>
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +000051#include <net/ip.h>
52#include <linux/netdevice.h>
53#include <linux/etherdevice.h>
Jon Masone8ac1752010-11-11 04:25:57 +000054#include <linux/firmware.h>
Jon Masonb81b3732010-11-11 04:25:58 +000055#include <linux/net_tstamp.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040056#include <linux/prefetch.h>
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +000057#include "vxge-main.h"
58#include "vxge-reg.h"
59
60MODULE_LICENSE("Dual BSD/GPL");
61MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
62 "Virtualized Server Adapter");
63
Alexey Dobriyana3aa1882010-01-07 11:58:11 +000064static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +000065 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
66 PCI_ANY_ID},
67 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
68 PCI_ANY_ID},
69 {0}
70};
71
72MODULE_DEVICE_TABLE(pci, vxge_id_table);
73
74VXGE_MODULE_PARAM_INT(vlan_tag_strip, VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE);
75VXGE_MODULE_PARAM_INT(addr_learn_en, VXGE_HW_MAC_ADDR_LEARN_DEFAULT);
76VXGE_MODULE_PARAM_INT(max_config_port, VXGE_MAX_CONFIG_PORT);
77VXGE_MODULE_PARAM_INT(max_config_vpath, VXGE_USE_DEFAULT);
78VXGE_MODULE_PARAM_INT(max_mac_vpath, VXGE_MAX_MAC_ADDR_COUNT);
79VXGE_MODULE_PARAM_INT(max_config_dev, VXGE_MAX_CONFIG_DEV);
80
81static u16 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS] =
82 {0, 1, 3, 3, 7, 7, 7, 7, 15, 15, 15, 15, 15, 15, 15, 15, 31};
83static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
84 {[0 ...(VXGE_HW_MAX_VIRTUAL_PATHS - 1)] = 0xFF};
85module_param_array(bw_percentage, uint, NULL, 0);
86
87static struct vxge_drv_config *driver_config;
88
89static inline int is_vxge_card_up(struct vxgedev *vdev)
90{
91 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
92}
93
94static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
95{
Benjamin LaHaiseff67df52009-08-04 10:21:03 +000096 struct sk_buff **skb_ptr = NULL;
97 struct sk_buff **temp;
98#define NR_SKB_COMPLETED 128
99 struct sk_buff *completed[NR_SKB_COMPLETED];
100 int more;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000101
Benjamin LaHaiseff67df52009-08-04 10:21:03 +0000102 do {
103 more = 0;
104 skb_ptr = completed;
105
Jon Mason98f45da2010-07-15 08:47:25 +0000106 if (__netif_tx_trylock(fifo->txq)) {
Benjamin LaHaiseff67df52009-08-04 10:21:03 +0000107 vxge_hw_vpath_poll_tx(fifo->handle, &skb_ptr,
108 NR_SKB_COMPLETED, &more);
Jon Mason98f45da2010-07-15 08:47:25 +0000109 __netif_tx_unlock(fifo->txq);
Benjamin LaHaiseff67df52009-08-04 10:21:03 +0000110 }
Jon Mason98f45da2010-07-15 08:47:25 +0000111
Benjamin LaHaiseff67df52009-08-04 10:21:03 +0000112 /* free SKBs */
113 for (temp = completed; temp != skb_ptr; temp++)
114 dev_kfree_skb_irq(*temp);
Jon Mason98f45da2010-07-15 08:47:25 +0000115 } while (more);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000116}
117
118static inline void VXGE_COMPLETE_ALL_TX(struct vxgedev *vdev)
119{
120 int i;
121
122 /* Complete all transmits */
123 for (i = 0; i < vdev->no_of_vpath; i++)
124 VXGE_COMPLETE_VPATH_TX(&vdev->vpaths[i].fifo);
125}
126
127static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
128{
129 int i;
130 struct vxge_ring *ring;
131
132 /* Complete all receives*/
133 for (i = 0; i < vdev->no_of_vpath; i++) {
134 ring = &vdev->vpaths[i].ring;
135 vxge_hw_vpath_poll_rx(ring->handle);
136 }
137}
138
139/*
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000140 * vxge_callback_link_up
141 *
142 * This function is called during interrupt context to notify link up state
143 * change.
144 */
Jon Mason528f7272010-12-10 14:02:56 +0000145static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000146{
147 struct net_device *dev = hldev->ndev;
Joe Perches5f54ceb2010-11-15 11:12:30 +0000148 struct vxgedev *vdev = netdev_priv(dev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000149
150 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
151 vdev->ndev->name, __func__, __LINE__);
Joe Perches75f5e1c2010-07-27 11:47:03 +0000152 netdev_notice(vdev->ndev, "Link Up\n");
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000153 vdev->stats.link_up++;
154
155 netif_carrier_on(vdev->ndev);
Jon Masond03848e2010-07-15 08:47:23 +0000156 netif_tx_wake_all_queues(vdev->ndev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000157
158 vxge_debug_entryexit(VXGE_TRACE,
159 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
160}
161
162/*
163 * vxge_callback_link_down
164 *
165 * This function is called during interrupt context to notify link down state
166 * change.
167 */
Jon Mason528f7272010-12-10 14:02:56 +0000168static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000169{
170 struct net_device *dev = hldev->ndev;
Joe Perches5f54ceb2010-11-15 11:12:30 +0000171 struct vxgedev *vdev = netdev_priv(dev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000172
173 vxge_debug_entryexit(VXGE_TRACE,
174 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
Joe Perches75f5e1c2010-07-27 11:47:03 +0000175 netdev_notice(vdev->ndev, "Link Down\n");
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000176
177 vdev->stats.link_down++;
178 netif_carrier_off(vdev->ndev);
Jon Masond03848e2010-07-15 08:47:23 +0000179 netif_tx_stop_all_queues(vdev->ndev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000180
181 vxge_debug_entryexit(VXGE_TRACE,
182 "%s: %s:%d Exiting...", vdev->ndev->name, __func__, __LINE__);
183}
184
185/*
186 * vxge_rx_alloc
187 *
188 * Allocate SKB.
189 */
Jon Mason528f7272010-12-10 14:02:56 +0000190static struct sk_buff *
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000191vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
192{
193 struct net_device *dev;
194 struct sk_buff *skb;
195 struct vxge_rx_priv *rx_priv;
196
197 dev = ring->ndev;
198 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
199 ring->ndev->name, __func__, __LINE__);
200
201 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
202
203 /* try to allocate skb first. this one may fail */
204 skb = netdev_alloc_skb(dev, skb_size +
205 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
206 if (skb == NULL) {
207 vxge_debug_mem(VXGE_ERR,
208 "%s: out of memory to allocate SKB", dev->name);
209 ring->stats.skb_alloc_fail++;
210 return NULL;
211 }
212
213 vxge_debug_mem(VXGE_TRACE,
214 "%s: %s:%d Skb : 0x%p", ring->ndev->name,
215 __func__, __LINE__, skb);
216
217 skb_reserve(skb, VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
218
219 rx_priv->skb = skb;
Benjamin LaHaiseea11bbe2009-08-04 10:21:57 +0000220 rx_priv->skb_data = NULL;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000221 rx_priv->data_size = skb_size;
222 vxge_debug_entryexit(VXGE_TRACE,
223 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
224
225 return skb;
226}
227
228/*
229 * vxge_rx_map
230 */
231static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
232{
233 struct vxge_rx_priv *rx_priv;
234 dma_addr_t dma_addr;
235
236 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
237 ring->ndev->name, __func__, __LINE__);
238 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
239
Benjamin LaHaiseea11bbe2009-08-04 10:21:57 +0000240 rx_priv->skb_data = rx_priv->skb->data;
241 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000242 rx_priv->data_size, PCI_DMA_FROMDEVICE);
243
Denis Kirjanovfa15e992010-01-10 13:40:10 -0800244 if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000245 ring->stats.pci_map_fail++;
246 return -EIO;
247 }
248 vxge_debug_mem(VXGE_TRACE,
249 "%s: %s:%d 1 buffer mode dma_addr = 0x%llx",
250 ring->ndev->name, __func__, __LINE__,
251 (unsigned long long)dma_addr);
252 vxge_hw_ring_rxd_1b_set(dtrh, dma_addr, rx_priv->data_size);
253
254 rx_priv->data_dma = dma_addr;
255 vxge_debug_entryexit(VXGE_TRACE,
256 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
257
258 return 0;
259}
260
261/*
262 * vxge_rx_initial_replenish
263 * Allocation of RxD as an initial replenish procedure.
264 */
265static enum vxge_hw_status
266vxge_rx_initial_replenish(void *dtrh, void *userdata)
267{
268 struct vxge_ring *ring = (struct vxge_ring *)userdata;
269 struct vxge_rx_priv *rx_priv;
270
271 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
272 ring->ndev->name, __func__, __LINE__);
273 if (vxge_rx_alloc(dtrh, ring,
274 VXGE_LL_MAX_FRAME_SIZE(ring->ndev)) == NULL)
275 return VXGE_HW_FAIL;
276
277 if (vxge_rx_map(dtrh, ring)) {
278 rx_priv = vxge_hw_ring_rxd_private_get(dtrh);
279 dev_kfree_skb(rx_priv->skb);
280
281 return VXGE_HW_FAIL;
282 }
283 vxge_debug_entryexit(VXGE_TRACE,
284 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
285
286 return VXGE_HW_OK;
287}
288
289static inline void
290vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
291 int pkt_length, struct vxge_hw_ring_rxd_info *ext_info)
292{
293
294 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
295 ring->ndev->name, __func__, __LINE__);
296 skb_record_rx_queue(skb, ring->driver_id);
297 skb->protocol = eth_type_trans(skb, ring->ndev);
298
stephen hemminger62ea0552011-06-20 10:35:07 +0000299 u64_stats_update_begin(&ring->stats.syncp);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000300 ring->stats.rx_frms++;
301 ring->stats.rx_bytes += pkt_length;
302
303 if (skb->pkt_type == PACKET_MULTICAST)
304 ring->stats.rx_mcast++;
stephen hemminger62ea0552011-06-20 10:35:07 +0000305 u64_stats_update_end(&ring->stats.syncp);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000306
307 vxge_debug_rx(VXGE_TRACE,
308 "%s: %s:%d skb protocol = %d",
309 ring->ndev->name, __func__, __LINE__, skb->protocol);
310
Michał Mirosławfeb990d2011-04-18 13:31:21 +0000311 if (ring->vlgrp && ext_info->vlan &&
312 (ring->vlan_tag_strip ==
313 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
314 vlan_gro_receive(ring->napi_p, ring->vlgrp,
315 ext_info->vlan, skb);
316 else
317 napi_gro_receive(ring->napi_p, skb);
318
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000319 vxge_debug_entryexit(VXGE_TRACE,
320 "%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
321}
322
323static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring,
324 struct vxge_rx_priv *rx_priv)
325{
326 pci_dma_sync_single_for_device(ring->pdev,
327 rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE);
328
329 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size);
330 vxge_hw_ring_rxd_pre_post(ring->handle, dtr);
331}
332
333static inline void vxge_post(int *dtr_cnt, void **first_dtr,
334 void *post_dtr, struct __vxge_hw_ring *ringh)
335{
336 int dtr_count = *dtr_cnt;
337 if ((*dtr_cnt % VXGE_HW_RXSYNC_FREQ_CNT) == 0) {
338 if (*first_dtr)
339 vxge_hw_ring_rxd_post_post_wmb(ringh, *first_dtr);
340 *first_dtr = post_dtr;
341 } else
342 vxge_hw_ring_rxd_post_post(ringh, post_dtr);
343 dtr_count++;
344 *dtr_cnt = dtr_count;
345}
346
347/*
348 * vxge_rx_1b_compl
349 *
350 * If the interrupt is because of a received frame or if the receive ring
351 * contains fresh as yet un-processed frames, this function is called.
352 */
stephen hemminger42821a52010-10-21 07:50:53 +0000353static enum vxge_hw_status
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000354vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
355 u8 t_code, void *userdata)
356{
357 struct vxge_ring *ring = (struct vxge_ring *)userdata;
Jon Masonb81b3732010-11-11 04:25:58 +0000358 struct net_device *dev = ring->ndev;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000359 unsigned int dma_sizes;
360 void *first_dtr = NULL;
361 int dtr_cnt = 0;
362 int data_size;
363 dma_addr_t data_dma;
364 int pkt_length;
365 struct sk_buff *skb;
366 struct vxge_rx_priv *rx_priv;
367 struct vxge_hw_ring_rxd_info ext_info;
368 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
369 ring->ndev->name, __func__, __LINE__);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000370
371 do {
Benjamin LaHaise3f23e432009-08-04 10:21:39 +0000372 prefetch((char *)dtr + L1_CACHE_BYTES);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000373 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
374 skb = rx_priv->skb;
375 data_size = rx_priv->data_size;
376 data_dma = rx_priv->data_dma;
Benjamin LaHaiseea11bbe2009-08-04 10:21:57 +0000377 prefetch(rx_priv->skb_data);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000378
379 vxge_debug_rx(VXGE_TRACE,
380 "%s: %s:%d skb = 0x%p",
381 ring->ndev->name, __func__, __LINE__, skb);
382
383 vxge_hw_ring_rxd_1b_get(ringh, dtr, &dma_sizes);
384 pkt_length = dma_sizes;
385
Sreenivasa Honnur22fa1252009-07-01 21:17:24 +0000386 pkt_length -= ETH_FCS_LEN;
387
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000388 vxge_debug_rx(VXGE_TRACE,
389 "%s: %s:%d Packet Length = %d",
390 ring->ndev->name, __func__, __LINE__, pkt_length);
391
392 vxge_hw_ring_rxd_1b_info_get(ringh, dtr, &ext_info);
393
394 /* check skb validity */
395 vxge_assert(skb);
396
397 prefetch((char *)skb + L1_CACHE_BYTES);
398 if (unlikely(t_code)) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000399 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
400 VXGE_HW_OK) {
401
402 ring->stats.rx_errors++;
403 vxge_debug_rx(VXGE_TRACE,
404 "%s: %s :%d Rx T_code is %d",
405 ring->ndev->name, __func__,
406 __LINE__, t_code);
407
408 /* If the t_code is not supported and if the
409 * t_code is other than 0x5 (unparseable packet
410 * such as unknown UPV6 header), Drop it !!!
411 */
412 vxge_re_pre_post(dtr, ring, rx_priv);
413
414 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
415 ring->stats.rx_dropped++;
416 continue;
417 }
418 }
419
420 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000421 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000422 if (!vxge_rx_map(dtr, ring)) {
423 skb_put(skb, pkt_length);
424
425 pci_unmap_single(ring->pdev, data_dma,
426 data_size, PCI_DMA_FROMDEVICE);
427
428 vxge_hw_ring_rxd_pre_post(ringh, dtr);
429 vxge_post(&dtr_cnt, &first_dtr, dtr,
430 ringh);
431 } else {
432 dev_kfree_skb(rx_priv->skb);
433 rx_priv->skb = skb;
434 rx_priv->data_size = data_size;
435 vxge_re_pre_post(dtr, ring, rx_priv);
436
437 vxge_post(&dtr_cnt, &first_dtr, dtr,
438 ringh);
439 ring->stats.rx_dropped++;
440 break;
441 }
442 } else {
443 vxge_re_pre_post(dtr, ring, rx_priv);
444
445 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
446 ring->stats.rx_dropped++;
447 break;
448 }
449 } else {
450 struct sk_buff *skb_up;
451
452 skb_up = netdev_alloc_skb(dev, pkt_length +
453 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
454 if (skb_up != NULL) {
455 skb_reserve(skb_up,
456 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN);
457
458 pci_dma_sync_single_for_cpu(ring->pdev,
459 data_dma, data_size,
460 PCI_DMA_FROMDEVICE);
461
462 vxge_debug_mem(VXGE_TRACE,
463 "%s: %s:%d skb_up = %p",
464 ring->ndev->name, __func__,
465 __LINE__, skb);
466 memcpy(skb_up->data, skb->data, pkt_length);
467
468 vxge_re_pre_post(dtr, ring, rx_priv);
469
470 vxge_post(&dtr_cnt, &first_dtr, dtr,
471 ringh);
472 /* will netif_rx small SKB instead */
473 skb = skb_up;
474 skb_put(skb, pkt_length);
475 } else {
476 vxge_re_pre_post(dtr, ring, rx_priv);
477
478 vxge_post(&dtr_cnt, &first_dtr, dtr, ringh);
479 vxge_debug_rx(VXGE_ERR,
480 "%s: vxge_rx_1b_compl: out of "
481 "memory", dev->name);
482 ring->stats.skb_alloc_fail++;
483 break;
484 }
485 }
486
487 if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
488 !(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
Michał Mirosławfeb990d2011-04-18 13:31:21 +0000489 (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000490 ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
491 ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
492 skb->ip_summed = CHECKSUM_UNNECESSARY;
493 else
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700494 skb_checksum_none_assert(skb);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000495
Jon Masonb81b3732010-11-11 04:25:58 +0000496
497 if (ring->rx_hwts) {
498 struct skb_shared_hwtstamps *skb_hwts;
499 u32 ns = *(u32 *)(skb->head + pkt_length);
500
501 skb_hwts = skb_hwtstamps(skb);
502 skb_hwts->hwtstamp = ns_to_ktime(ns);
503 skb_hwts->syststamp.tv64 = 0;
504 }
505
Jon Mason47f01db2010-11-11 04:25:53 +0000506 /* rth_hash_type and rth_it_hit are non-zero regardless of
507 * whether rss is enabled. Only the rth_value is zero/non-zero
508 * if rss is disabled/enabled, so key off of that.
509 */
510 if (ext_info.rth_value)
511 skb->rxhash = ext_info.rth_value;
512
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000513 vxge_rx_complete(ring, skb, ext_info.vlan,
514 pkt_length, &ext_info);
515
516 ring->budget--;
517 ring->pkts_processed++;
518 if (!ring->budget)
519 break;
520
521 } while (vxge_hw_ring_rxd_next_completed(ringh, &dtr,
522 &t_code) == VXGE_HW_OK);
523
524 if (first_dtr)
525 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
526
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000527 vxge_debug_entryexit(VXGE_TRACE,
528 "%s:%d Exiting...",
529 __func__, __LINE__);
530 return VXGE_HW_OK;
531}
532
533/*
534 * vxge_xmit_compl
535 *
536 * If an interrupt was raised to indicate DMA complete of the Tx packet,
537 * this function is called. It identifies the last TxD whose buffer was
538 * freed and frees all skbs whose data have already DMA'ed into the NICs
539 * internal memory.
540 */
stephen hemminger42821a52010-10-21 07:50:53 +0000541static enum vxge_hw_status
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000542vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
543 enum vxge_hw_fifo_tcode t_code, void *userdata,
Benjamin LaHaiseff67df52009-08-04 10:21:03 +0000544 struct sk_buff ***skb_ptr, int nr_skb, int *more)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000545{
546 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
Benjamin LaHaiseff67df52009-08-04 10:21:03 +0000547 struct sk_buff *skb, **done_skb = *skb_ptr;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000548 int pkt_cnt = 0;
549
550 vxge_debug_entryexit(VXGE_TRACE,
551 "%s:%d Entered....", __func__, __LINE__);
552
553 do {
554 int frg_cnt;
555 skb_frag_t *frag;
556 int i = 0, j;
557 struct vxge_tx_priv *txd_priv =
558 vxge_hw_fifo_txdl_private_get(dtr);
559
560 skb = txd_priv->skb;
561 frg_cnt = skb_shinfo(skb)->nr_frags;
562 frag = &skb_shinfo(skb)->frags[0];
563
564 vxge_debug_tx(VXGE_TRACE,
565 "%s: %s:%d fifo_hw = %p dtr = %p "
566 "tcode = 0x%x", fifo->ndev->name, __func__,
567 __LINE__, fifo_hw, dtr, t_code);
568 /* check skb validity */
569 vxge_assert(skb);
570 vxge_debug_tx(VXGE_TRACE,
571 "%s: %s:%d skb = %p itxd_priv = %p frg_cnt = %d",
572 fifo->ndev->name, __func__, __LINE__,
573 skb, txd_priv, frg_cnt);
574 if (unlikely(t_code)) {
575 fifo->stats.tx_errors++;
576 vxge_debug_tx(VXGE_ERR,
577 "%s: tx: dtr %p completed due to "
578 "error t_code %01x", fifo->ndev->name,
579 dtr, t_code);
580 vxge_hw_fifo_handle_tcode(fifo_hw, dtr, t_code);
581 }
582
583 /* for unfragmented skb */
584 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
585 skb_headlen(skb), PCI_DMA_TODEVICE);
586
587 for (j = 0; j < frg_cnt; j++) {
588 pci_unmap_page(fifo->pdev,
589 txd_priv->dma_buffers[i++],
590 frag->size, PCI_DMA_TODEVICE);
591 frag += 1;
592 }
593
594 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
595
596 /* Updating the statistics block */
stephen hemminger62ea0552011-06-20 10:35:07 +0000597 u64_stats_update_begin(&fifo->stats.syncp);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000598 fifo->stats.tx_frms++;
599 fifo->stats.tx_bytes += skb->len;
stephen hemminger62ea0552011-06-20 10:35:07 +0000600 u64_stats_update_end(&fifo->stats.syncp);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000601
Benjamin LaHaiseff67df52009-08-04 10:21:03 +0000602 *done_skb++ = skb;
603
604 if (--nr_skb <= 0) {
605 *more = 1;
606 break;
607 }
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000608
609 pkt_cnt++;
610 if (pkt_cnt > fifo->indicate_max_pkts)
611 break;
612
613 } while (vxge_hw_fifo_txdl_next_completed(fifo_hw,
614 &dtr, &t_code) == VXGE_HW_OK);
615
Benjamin LaHaiseff67df52009-08-04 10:21:03 +0000616 *skb_ptr = done_skb;
Jon Mason98f45da2010-07-15 08:47:25 +0000617 if (netif_tx_queue_stopped(fifo->txq))
618 netif_tx_wake_queue(fifo->txq);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000619
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000620 vxge_debug_entryexit(VXGE_TRACE,
621 "%s: %s:%d Exiting...",
622 fifo->ndev->name, __func__, __LINE__);
623 return VXGE_HW_OK;
624}
625
Eric Dumazet28679752009-05-27 19:26:37 +0000626/* select a vpath to transmit the packet */
Jon Mason98f45da2010-07-15 08:47:25 +0000627static u32 vxge_get_vpath_no(struct vxgedev *vdev, struct sk_buff *skb)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000628{
629 u16 queue_len, counter = 0;
630 if (skb->protocol == htons(ETH_P_IP)) {
631 struct iphdr *ip;
632 struct tcphdr *th;
633
634 ip = ip_hdr(skb);
635
Paul Gortmaker56f8a752011-06-21 20:33:34 -0700636 if (!ip_is_fragment(ip)) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000637 th = (struct tcphdr *)(((unsigned char *)ip) +
638 ip->ihl*4);
639
640 queue_len = vdev->no_of_vpath;
641 counter = (ntohs(th->source) +
642 ntohs(th->dest)) &
643 vdev->vpath_selector[queue_len - 1];
644 if (counter >= queue_len)
645 counter = queue_len - 1;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000646 }
647 }
648 return counter;
649}
650
651static enum vxge_hw_status vxge_search_mac_addr_in_list(
652 struct vxge_vpath *vpath, u64 del_mac)
653{
654 struct list_head *entry, *next;
655 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
656 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac)
657 return TRUE;
658 }
659 return FALSE;
660}
661
Jon Mason528f7272010-12-10 14:02:56 +0000662static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
663{
664 struct vxge_mac_addrs *new_mac_entry;
665 u8 *mac_address = NULL;
666
667 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
668 return TRUE;
669
670 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
671 if (!new_mac_entry) {
672 vxge_debug_mem(VXGE_ERR,
673 "%s: memory allocation failed",
674 VXGE_DRIVER_NAME);
675 return FALSE;
676 }
677
678 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
679
680 /* Copy the new mac address to the list */
681 mac_address = (u8 *)&new_mac_entry->macaddr;
682 memcpy(mac_address, mac->macaddr, ETH_ALEN);
683
684 new_mac_entry->state = mac->state;
685 vpath->mac_addr_cnt++;
686
Tobias Klauserf8d4aa22011-07-03 23:58:04 +0000687 if (is_multicast_ether_addr(mac->macaddr))
Jon Mason528f7272010-12-10 14:02:56 +0000688 vpath->mcast_addr_cnt++;
689
690 return TRUE;
691}
692
693/* Add a mac address to DA table */
694static enum vxge_hw_status
695vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
696{
697 enum vxge_hw_status status = VXGE_HW_OK;
698 struct vxge_vpath *vpath;
699 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
700
Tobias Klauserf8d4aa22011-07-03 23:58:04 +0000701 if (is_multicast_ether_addr(mac->macaddr))
Jon Mason528f7272010-12-10 14:02:56 +0000702 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
703 else
704 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
705
706 vpath = &vdev->vpaths[mac->vpath_no];
707 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
708 mac->macmask, duplicate_mode);
709 if (status != VXGE_HW_OK) {
710 vxge_debug_init(VXGE_ERR,
711 "DA config add entry failed for vpath:%d",
712 vpath->device_id);
713 } else
714 if (FALSE == vxge_mac_list_add(vpath, mac))
715 status = -EPERM;
716
717 return status;
718}
719
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000720static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
721{
722 struct macInfo mac_info;
723 u8 *mac_address = NULL;
724 u64 mac_addr = 0, vpath_vector = 0;
725 int vpath_idx = 0;
726 enum vxge_hw_status status = VXGE_HW_OK;
727 struct vxge_vpath *vpath = NULL;
728 struct __vxge_hw_device *hldev;
729
Joe Perchesd8ee7072010-11-15 10:13:58 +0000730 hldev = pci_get_drvdata(vdev->pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000731
732 mac_address = (u8 *)&mac_addr;
733 memcpy(mac_address, mac_header, ETH_ALEN);
734
735 /* Is this mac address already in the list? */
736 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
737 vpath = &vdev->vpaths[vpath_idx];
738 if (vxge_search_mac_addr_in_list(vpath, mac_addr))
739 return vpath_idx;
740 }
741
742 memset(&mac_info, 0, sizeof(struct macInfo));
743 memcpy(mac_info.macaddr, mac_header, ETH_ALEN);
744
745 /* Any vpath has room to add mac address to its da table? */
746 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
747 vpath = &vdev->vpaths[vpath_idx];
748 if (vpath->mac_addr_cnt < vpath->max_mac_addr_cnt) {
749 /* Add this mac address to this vpath */
750 mac_info.vpath_no = vpath_idx;
751 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
752 status = vxge_add_mac_addr(vdev, &mac_info);
753 if (status != VXGE_HW_OK)
754 return -EPERM;
755 return vpath_idx;
756 }
757 }
758
759 mac_info.state = VXGE_LL_MAC_ADDR_IN_LIST;
760 vpath_idx = 0;
761 mac_info.vpath_no = vpath_idx;
762 /* Is the first vpath already selected as catch-basin ? */
763 vpath = &vdev->vpaths[vpath_idx];
764 if (vpath->mac_addr_cnt > vpath->max_mac_addr_cnt) {
765 /* Add this mac address to this vpath */
766 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
767 return -EPERM;
768 return vpath_idx;
769 }
770
771 /* Select first vpath as catch-basin */
772 vpath_vector = vxge_mBIT(vpath->device_id);
773 status = vxge_hw_mgmt_reg_write(vpath->vdev->devh,
774 vxge_hw_mgmt_reg_type_mrpcim,
775 0,
776 (ulong)offsetof(
777 struct vxge_hw_mrpcim_reg,
778 rts_mgr_cbasin_cfg),
779 vpath_vector);
780 if (status != VXGE_HW_OK) {
781 vxge_debug_tx(VXGE_ERR,
782 "%s: Unable to set the vpath-%d in catch-basin mode",
783 VXGE_DRIVER_NAME, vpath->device_id);
784 return -EPERM;
785 }
786
787 if (FALSE == vxge_mac_list_add(vpath, &mac_info))
788 return -EPERM;
789
790 return vpath_idx;
791}
792
793/**
794 * vxge_xmit
795 * @skb : the socket buffer containing the Tx data.
796 * @dev : device pointer.
797 *
798 * This function is the Tx entry point of the driver. Neterion NIC supports
799 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000800*/
Stephen Hemminger613573252009-08-31 19:50:58 +0000801static netdev_tx_t
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000802vxge_xmit(struct sk_buff *skb, struct net_device *dev)
803{
804 struct vxge_fifo *fifo = NULL;
805 void *dtr_priv;
806 void *dtr = NULL;
807 struct vxgedev *vdev = NULL;
808 enum vxge_hw_status status;
809 int frg_cnt, first_frg_len;
810 skb_frag_t *frag;
811 int i = 0, j = 0, avail;
812 u64 dma_pointer;
813 struct vxge_tx_priv *txdl_priv = NULL;
814 struct __vxge_hw_fifo *fifo_hw;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000815 int offload_type;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000816 int vpath_no = 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000817
818 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
819 dev->name, __func__, __LINE__);
820
821 /* A buffer with no data will be dropped */
822 if (unlikely(skb->len <= 0)) {
823 vxge_debug_tx(VXGE_ERR,
824 "%s: Buffer has no data..", dev->name);
825 dev_kfree_skb(skb);
826 return NETDEV_TX_OK;
827 }
828
Joe Perches5f54ceb2010-11-15 11:12:30 +0000829 vdev = netdev_priv(dev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000830
831 if (unlikely(!is_vxge_card_up(vdev))) {
832 vxge_debug_tx(VXGE_ERR,
833 "%s: vdev not initialized", dev->name);
834 dev_kfree_skb(skb);
835 return NETDEV_TX_OK;
836 }
837
838 if (vdev->config.addr_learn_en) {
839 vpath_no = vxge_learn_mac(vdev, skb->data + ETH_ALEN);
840 if (vpath_no == -EPERM) {
841 vxge_debug_tx(VXGE_ERR,
842 "%s: Failed to store the mac address",
843 dev->name);
844 dev_kfree_skb(skb);
845 return NETDEV_TX_OK;
846 }
847 }
848
849 if (vdev->config.tx_steering_type == TX_MULTIQ_STEERING)
850 vpath_no = skb_get_queue_mapping(skb);
851 else if (vdev->config.tx_steering_type == TX_PORT_STEERING)
Jon Mason98f45da2010-07-15 08:47:25 +0000852 vpath_no = vxge_get_vpath_no(vdev, skb);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000853
854 vxge_debug_tx(VXGE_TRACE, "%s: vpath_no= %d", dev->name, vpath_no);
855
856 if (vpath_no >= vdev->no_of_vpath)
857 vpath_no = 0;
858
859 fifo = &vdev->vpaths[vpath_no].fifo;
860 fifo_hw = fifo->handle;
861
Jon Mason98f45da2010-07-15 08:47:25 +0000862 if (netif_tx_queue_stopped(fifo->txq))
Jon Masond03848e2010-07-15 08:47:23 +0000863 return NETDEV_TX_BUSY;
Jon Masond03848e2010-07-15 08:47:23 +0000864
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000865 avail = vxge_hw_fifo_free_txdl_count_get(fifo_hw);
866 if (avail == 0) {
867 vxge_debug_tx(VXGE_ERR,
868 "%s: No free TXDs available", dev->name);
869 fifo->stats.txd_not_free++;
Jon Mason98f45da2010-07-15 08:47:25 +0000870 goto _exit0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000871 }
872
Benjamin LaHaise4403b372009-08-04 10:20:44 +0000873 /* Last TXD? Stop tx queue to avoid dropping packets. TX
874 * completion will resume the queue.
875 */
876 if (avail == 1)
Jon Mason98f45da2010-07-15 08:47:25 +0000877 netif_tx_stop_queue(fifo->txq);
Benjamin LaHaise4403b372009-08-04 10:20:44 +0000878
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000879 status = vxge_hw_fifo_txdl_reserve(fifo_hw, &dtr, &dtr_priv);
880 if (unlikely(status != VXGE_HW_OK)) {
881 vxge_debug_tx(VXGE_ERR,
882 "%s: Out of descriptors .", dev->name);
883 fifo->stats.txd_out_of_desc++;
Jon Mason98f45da2010-07-15 08:47:25 +0000884 goto _exit0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000885 }
886
887 vxge_debug_tx(VXGE_TRACE,
888 "%s: %s:%d fifo_hw = %p dtr = %p dtr_priv = %p",
889 dev->name, __func__, __LINE__,
890 fifo_hw, dtr, dtr_priv);
891
Jesse Grosseab6d182010-10-20 13:56:03 +0000892 if (vlan_tx_tag_present(skb)) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000893 u16 vlan_tag = vlan_tx_tag_get(skb);
894 vxge_hw_fifo_txdl_vlan_set(dtr, vlan_tag);
895 }
896
897 first_frg_len = skb_headlen(skb);
898
899 dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len,
900 PCI_DMA_TODEVICE);
901
902 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) {
903 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000904 fifo->stats.pci_map_fail++;
Jon Mason98f45da2010-07-15 08:47:25 +0000905 goto _exit0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000906 }
907
908 txdl_priv = vxge_hw_fifo_txdl_private_get(dtr);
909 txdl_priv->skb = skb;
910 txdl_priv->dma_buffers[j] = dma_pointer;
911
912 frg_cnt = skb_shinfo(skb)->nr_frags;
913 vxge_debug_tx(VXGE_TRACE,
914 "%s: %s:%d skb = %p txdl_priv = %p "
915 "frag_cnt = %d dma_pointer = 0x%llx", dev->name,
916 __func__, __LINE__, skb, txdl_priv,
917 frg_cnt, (unsigned long long)dma_pointer);
918
919 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
920 first_frg_len);
921
922 frag = &skb_shinfo(skb)->frags[0];
923 for (i = 0; i < frg_cnt; i++) {
924 /* ignore 0 length fragment */
925 if (!frag->size)
926 continue;
927
Jon Mason98f45da2010-07-15 08:47:25 +0000928 dma_pointer = (u64) pci_map_page(fifo->pdev, frag->page,
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000929 frag->page_offset, frag->size,
930 PCI_DMA_TODEVICE);
931
932 if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer)))
Jon Mason98f45da2010-07-15 08:47:25 +0000933 goto _exit2;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000934 vxge_debug_tx(VXGE_TRACE,
935 "%s: %s:%d frag = %d dma_pointer = 0x%llx",
936 dev->name, __func__, __LINE__, i,
937 (unsigned long long)dma_pointer);
938
939 txdl_priv->dma_buffers[j] = dma_pointer;
940 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer,
941 frag->size);
942 frag += 1;
943 }
944
945 offload_type = vxge_offload_type(skb);
946
947 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000948 int mss = vxge_tcp_mss(skb);
949 if (mss) {
Jon Mason98f45da2010-07-15 08:47:25 +0000950 vxge_debug_tx(VXGE_TRACE, "%s: %s:%d mss = %d",
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000951 dev->name, __func__, __LINE__, mss);
952 vxge_hw_fifo_txdl_mss_set(dtr, mss);
953 } else {
954 vxge_assert(skb->len <=
955 dev->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE);
956 vxge_assert(0);
957 goto _exit1;
958 }
959 }
960
961 if (skb->ip_summed == CHECKSUM_PARTIAL)
962 vxge_hw_fifo_txdl_cksum_set_bits(dtr,
963 VXGE_HW_FIFO_TXD_TX_CKO_IPV4_EN |
964 VXGE_HW_FIFO_TXD_TX_CKO_TCP_EN |
965 VXGE_HW_FIFO_TXD_TX_CKO_UDP_EN);
966
967 vxge_hw_fifo_txdl_post(fifo_hw, dtr);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000968
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000969 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
970 dev->name, __func__, __LINE__);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000971 return NETDEV_TX_OK;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000972
Jon Mason98f45da2010-07-15 08:47:25 +0000973_exit2:
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000974 vxge_debug_tx(VXGE_TRACE, "%s: pci_map_page failed", dev->name);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000975_exit1:
976 j = 0;
977 frag = &skb_shinfo(skb)->frags[0];
978
979 pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++],
980 skb_headlen(skb), PCI_DMA_TODEVICE);
981
982 for (; j < i; j++) {
983 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j],
984 frag->size, PCI_DMA_TODEVICE);
985 frag += 1;
986 }
987
988 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
Jon Mason98f45da2010-07-15 08:47:25 +0000989_exit0:
990 netif_tx_stop_queue(fifo->txq);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000991 dev_kfree_skb(skb);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000992
Patrick McHardy6ed10652009-06-23 06:03:08 +0000993 return NETDEV_TX_OK;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000994}
995
996/*
997 * vxge_rx_term
998 *
999 * Function will be called by hw function to abort all outstanding receive
1000 * descriptors.
1001 */
1002static void
1003vxge_rx_term(void *dtrh, enum vxge_hw_rxd_state state, void *userdata)
1004{
1005 struct vxge_ring *ring = (struct vxge_ring *)userdata;
1006 struct vxge_rx_priv *rx_priv =
1007 vxge_hw_ring_rxd_private_get(dtrh);
1008
1009 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
1010 ring->ndev->name, __func__, __LINE__);
1011 if (state != VXGE_HW_RXD_STATE_POSTED)
1012 return;
1013
1014 pci_unmap_single(ring->pdev, rx_priv->data_dma,
1015 rx_priv->data_size, PCI_DMA_FROMDEVICE);
1016
1017 dev_kfree_skb(rx_priv->skb);
Benjamin LaHaiseea11bbe2009-08-04 10:21:57 +00001018 rx_priv->skb_data = NULL;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001019
1020 vxge_debug_entryexit(VXGE_TRACE,
1021 "%s: %s:%d Exiting...",
1022 ring->ndev->name, __func__, __LINE__);
1023}
1024
1025/*
1026 * vxge_tx_term
1027 *
1028 * Function will be called to abort all outstanding tx descriptors
1029 */
1030static void
1031vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1032{
1033 struct vxge_fifo *fifo = (struct vxge_fifo *)userdata;
1034 skb_frag_t *frag;
1035 int i = 0, j, frg_cnt;
1036 struct vxge_tx_priv *txd_priv = vxge_hw_fifo_txdl_private_get(dtrh);
1037 struct sk_buff *skb = txd_priv->skb;
1038
1039 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1040
1041 if (state != VXGE_HW_TXDL_STATE_POSTED)
1042 return;
1043
1044 /* check skb validity */
1045 vxge_assert(skb);
1046 frg_cnt = skb_shinfo(skb)->nr_frags;
1047 frag = &skb_shinfo(skb)->frags[0];
1048
1049 /* for unfragmented skb */
1050 pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++],
1051 skb_headlen(skb), PCI_DMA_TODEVICE);
1052
1053 for (j = 0; j < frg_cnt; j++) {
1054 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++],
1055 frag->size, PCI_DMA_TODEVICE);
1056 frag += 1;
1057 }
1058
1059 dev_kfree_skb(skb);
1060
1061 vxge_debug_entryexit(VXGE_TRACE,
1062 "%s:%d Exiting...", __func__, __LINE__);
1063}
1064
Jon Mason528f7272010-12-10 14:02:56 +00001065static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1066{
1067 struct list_head *entry, *next;
1068 u64 del_mac = 0;
1069 u8 *mac_address = (u8 *) (&del_mac);
1070
1071 /* Copy the mac address to delete from the list */
1072 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1073
1074 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1075 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1076 list_del(entry);
1077 kfree((struct vxge_mac_addrs *)entry);
1078 vpath->mac_addr_cnt--;
1079
Tobias Klauserf8d4aa22011-07-03 23:58:04 +00001080 if (is_multicast_ether_addr(mac->macaddr))
Jon Mason528f7272010-12-10 14:02:56 +00001081 vpath->mcast_addr_cnt--;
1082 return TRUE;
1083 }
1084 }
1085
1086 return FALSE;
1087}
1088
1089/* delete a mac address from DA table */
1090static enum vxge_hw_status
1091vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1092{
1093 enum vxge_hw_status status = VXGE_HW_OK;
1094 struct vxge_vpath *vpath;
1095
1096 vpath = &vdev->vpaths[mac->vpath_no];
1097 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1098 mac->macmask);
1099 if (status != VXGE_HW_OK) {
1100 vxge_debug_init(VXGE_ERR,
1101 "DA config delete entry failed for vpath:%d",
1102 vpath->device_id);
1103 } else
1104 vxge_mac_list_del(vpath, mac);
1105 return status;
1106}
1107
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001108/**
1109 * vxge_set_multicast
1110 * @dev: pointer to the device structure
1111 *
1112 * Entry point for multicast address enable/disable
1113 * This function is a driver entry point which gets called by the kernel
1114 * whenever multicast addresses must be enabled/disabled. This also gets
1115 * called to set/reset promiscuous mode. Depending on the deivce flag, we
1116 * determine, if multicast address must be enabled or if promiscuous mode
1117 * is to be disabled etc.
1118 */
1119static void vxge_set_multicast(struct net_device *dev)
1120{
Jiri Pirko22bedad2010-04-01 21:22:57 +00001121 struct netdev_hw_addr *ha;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001122 struct vxgedev *vdev;
1123 int i, mcast_cnt = 0;
Jon Mason7adf7d12010-07-15 08:47:24 +00001124 struct __vxge_hw_device *hldev;
1125 struct vxge_vpath *vpath;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001126 enum vxge_hw_status status = VXGE_HW_OK;
1127 struct macInfo mac_info;
1128 int vpath_idx = 0;
1129 struct vxge_mac_addrs *mac_entry;
1130 struct list_head *list_head;
1131 struct list_head *entry, *next;
1132 u8 *mac_address = NULL;
1133
1134 vxge_debug_entryexit(VXGE_TRACE,
1135 "%s:%d", __func__, __LINE__);
1136
Joe Perches5f54ceb2010-11-15 11:12:30 +00001137 vdev = netdev_priv(dev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001138 hldev = (struct __vxge_hw_device *)vdev->devh;
1139
1140 if (unlikely(!is_vxge_card_up(vdev)))
1141 return;
1142
1143 if ((dev->flags & IFF_ALLMULTI) && (!vdev->all_multi_flg)) {
1144 for (i = 0; i < vdev->no_of_vpath; i++) {
Jon Mason7adf7d12010-07-15 08:47:24 +00001145 vpath = &vdev->vpaths[i];
1146 vxge_assert(vpath->is_open);
1147 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1148 if (status != VXGE_HW_OK)
1149 vxge_debug_init(VXGE_ERR, "failed to enable "
1150 "multicast, status %d", status);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001151 vdev->all_multi_flg = 1;
1152 }
Jon Mason7adf7d12010-07-15 08:47:24 +00001153 } else if (!(dev->flags & IFF_ALLMULTI) && (vdev->all_multi_flg)) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001154 for (i = 0; i < vdev->no_of_vpath; i++) {
Jon Mason7adf7d12010-07-15 08:47:24 +00001155 vpath = &vdev->vpaths[i];
1156 vxge_assert(vpath->is_open);
1157 status = vxge_hw_vpath_mcast_disable(vpath->handle);
1158 if (status != VXGE_HW_OK)
1159 vxge_debug_init(VXGE_ERR, "failed to disable "
1160 "multicast, status %d", status);
1161 vdev->all_multi_flg = 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001162 }
1163 }
1164
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001165
1166 if (!vdev->config.addr_learn_en) {
Jon Mason7adf7d12010-07-15 08:47:24 +00001167 for (i = 0; i < vdev->no_of_vpath; i++) {
1168 vpath = &vdev->vpaths[i];
1169 vxge_assert(vpath->is_open);
1170
1171 if (dev->flags & IFF_PROMISC)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001172 status = vxge_hw_vpath_promisc_enable(
Jon Mason7adf7d12010-07-15 08:47:24 +00001173 vpath->handle);
1174 else
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001175 status = vxge_hw_vpath_promisc_disable(
Jon Mason7adf7d12010-07-15 08:47:24 +00001176 vpath->handle);
1177 if (status != VXGE_HW_OK)
1178 vxge_debug_init(VXGE_ERR, "failed to %s promisc"
1179 ", status %d", dev->flags&IFF_PROMISC ?
1180 "enable" : "disable", status);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001181 }
1182 }
1183
1184 memset(&mac_info, 0, sizeof(struct macInfo));
1185 /* Update individual M_CAST address list */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001186 if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001187 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1188 list_head = &vdev->vpaths[0].mac_addr_list;
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001189 if ((netdev_mc_count(dev) +
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001190 (vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
1191 vdev->vpaths[0].max_mac_addr_cnt)
1192 goto _set_all_mcast;
1193
1194 /* Delete previous MC's */
1195 for (i = 0; i < mcast_cnt; i++) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001196 list_for_each_safe(entry, next, list_head) {
Jon Mason2c913082010-11-11 04:26:03 +00001197 mac_entry = (struct vxge_mac_addrs *)entry;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001198 /* Copy the mac address to delete */
1199 mac_address = (u8 *)&mac_entry->macaddr;
1200 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1201
Tobias Klauserf8d4aa22011-07-03 23:58:04 +00001202 if (is_multicast_ether_addr(mac_info.macaddr)) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001203 for (vpath_idx = 0; vpath_idx <
1204 vdev->no_of_vpath;
1205 vpath_idx++) {
1206 mac_info.vpath_no = vpath_idx;
1207 status = vxge_del_mac_addr(
1208 vdev,
1209 &mac_info);
1210 }
1211 }
1212 }
1213 }
1214
1215 /* Add new ones */
Jiri Pirko22bedad2010-04-01 21:22:57 +00001216 netdev_for_each_mc_addr(ha, dev) {
1217 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001218 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1219 vpath_idx++) {
1220 mac_info.vpath_no = vpath_idx;
1221 mac_info.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1222 status = vxge_add_mac_addr(vdev, &mac_info);
1223 if (status != VXGE_HW_OK) {
1224 vxge_debug_init(VXGE_ERR,
1225 "%s:%d Setting individual"
1226 "multicast address failed",
1227 __func__, __LINE__);
1228 goto _set_all_mcast;
1229 }
1230 }
1231 }
1232
1233 return;
1234_set_all_mcast:
1235 mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
1236 /* Delete previous MC's */
1237 for (i = 0; i < mcast_cnt; i++) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001238 list_for_each_safe(entry, next, list_head) {
Jon Mason2c913082010-11-11 04:26:03 +00001239 mac_entry = (struct vxge_mac_addrs *)entry;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001240 /* Copy the mac address to delete */
1241 mac_address = (u8 *)&mac_entry->macaddr;
1242 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1243
Tobias Klauserf8d4aa22011-07-03 23:58:04 +00001244 if (is_multicast_ether_addr(mac_info.macaddr))
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001245 break;
1246 }
1247
1248 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1249 vpath_idx++) {
1250 mac_info.vpath_no = vpath_idx;
1251 status = vxge_del_mac_addr(vdev, &mac_info);
1252 }
1253 }
1254
1255 /* Enable all multicast */
1256 for (i = 0; i < vdev->no_of_vpath; i++) {
Jon Mason7adf7d12010-07-15 08:47:24 +00001257 vpath = &vdev->vpaths[i];
1258 vxge_assert(vpath->is_open);
1259
1260 status = vxge_hw_vpath_mcast_enable(vpath->handle);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001261 if (status != VXGE_HW_OK) {
1262 vxge_debug_init(VXGE_ERR,
1263 "%s:%d Enabling all multicasts failed",
1264 __func__, __LINE__);
1265 }
1266 vdev->all_multi_flg = 1;
1267 }
1268 dev->flags |= IFF_ALLMULTI;
1269 }
1270
1271 vxge_debug_entryexit(VXGE_TRACE,
1272 "%s:%d Exiting...", __func__, __LINE__);
1273}
1274
1275/**
1276 * vxge_set_mac_addr
1277 * @dev: pointer to the device structure
1278 *
1279 * Update entry "0" (default MAC addr)
1280 */
1281static int vxge_set_mac_addr(struct net_device *dev, void *p)
1282{
1283 struct sockaddr *addr = p;
1284 struct vxgedev *vdev;
Jon Mason2c913082010-11-11 04:26:03 +00001285 struct __vxge_hw_device *hldev;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001286 enum vxge_hw_status status = VXGE_HW_OK;
1287 struct macInfo mac_info_new, mac_info_old;
1288 int vpath_idx = 0;
1289
1290 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1291
Joe Perches5f54ceb2010-11-15 11:12:30 +00001292 vdev = netdev_priv(dev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001293 hldev = vdev->devh;
1294
1295 if (!is_valid_ether_addr(addr->sa_data))
1296 return -EINVAL;
1297
1298 memset(&mac_info_new, 0, sizeof(struct macInfo));
1299 memset(&mac_info_old, 0, sizeof(struct macInfo));
1300
1301 vxge_debug_entryexit(VXGE_TRACE, "%s:%d Exiting...",
1302 __func__, __LINE__);
1303
1304 /* Get the old address */
1305 memcpy(mac_info_old.macaddr, dev->dev_addr, dev->addr_len);
1306
1307 /* Copy the new address */
1308 memcpy(mac_info_new.macaddr, addr->sa_data, dev->addr_len);
1309
1310 /* First delete the old mac address from all the vpaths
1311 as we can't specify the index while adding new mac address */
1312 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1313 struct vxge_vpath *vpath = &vdev->vpaths[vpath_idx];
1314 if (!vpath->is_open) {
1315 /* This can happen when this interface is added/removed
1316 to the bonding interface. Delete this station address
1317 from the linked list */
1318 vxge_mac_list_del(vpath, &mac_info_old);
1319
1320 /* Add this new address to the linked list
1321 for later restoring */
1322 vxge_mac_list_add(vpath, &mac_info_new);
1323
1324 continue;
1325 }
1326 /* Delete the station address */
1327 mac_info_old.vpath_no = vpath_idx;
1328 status = vxge_del_mac_addr(vdev, &mac_info_old);
1329 }
1330
1331 if (unlikely(!is_vxge_card_up(vdev))) {
1332 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1333 return VXGE_HW_OK;
1334 }
1335
1336 /* Set this mac address to all the vpaths */
1337 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
1338 mac_info_new.vpath_no = vpath_idx;
1339 mac_info_new.state = VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1340 status = vxge_add_mac_addr(vdev, &mac_info_new);
1341 if (status != VXGE_HW_OK)
1342 return -EINVAL;
1343 }
1344
1345 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1346
1347 return status;
1348}
1349
1350/*
1351 * vxge_vpath_intr_enable
1352 * @vdev: pointer to vdev
1353 * @vp_id: vpath for which to enable the interrupts
1354 *
1355 * Enables the interrupts for the vpath
1356*/
stephen hemminger42821a52010-10-21 07:50:53 +00001357static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001358{
1359 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00001360 int msix_id = 0;
1361 int tim_msix_id[4] = {0, 1, 0, 0};
1362 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001363
1364 vxge_hw_vpath_intr_enable(vpath->handle);
1365
1366 if (vdev->config.intr_type == INTA)
1367 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1368 else {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001369 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1370 alarm_msix_id);
1371
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00001372 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001373 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1374 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1375
1376 /* enable the alarm vector */
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00001377 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1378 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1379 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001380 }
1381}
1382
1383/*
1384 * vxge_vpath_intr_disable
1385 * @vdev: pointer to vdev
1386 * @vp_id: vpath for which to disable the interrupts
1387 *
1388 * Disables the interrupts for the vpath
1389*/
stephen hemminger42821a52010-10-21 07:50:53 +00001390static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001391{
1392 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
Jon Mason4d2a5b42010-11-11 04:25:54 +00001393 struct __vxge_hw_device *hldev;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001394 int msix_id;
1395
Joe Perchesd8ee7072010-11-15 10:13:58 +00001396 hldev = pci_get_drvdata(vdev->pdev);
Jon Mason4d2a5b42010-11-11 04:25:54 +00001397
1398 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1399
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001400 vxge_hw_vpath_intr_disable(vpath->handle);
1401
1402 if (vdev->config.intr_type == INTA)
1403 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1404 else {
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00001405 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001406 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1407 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1408
1409 /* disable the alarm vector */
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00001410 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1411 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001412 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1413 }
1414}
1415
Jon Mason528f7272010-12-10 14:02:56 +00001416/* list all mac addresses from DA table */
1417static enum vxge_hw_status
1418vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1419{
1420 enum vxge_hw_status status = VXGE_HW_OK;
1421 unsigned char macmask[ETH_ALEN];
1422 unsigned char macaddr[ETH_ALEN];
1423
1424 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1425 macaddr, macmask);
1426 if (status != VXGE_HW_OK) {
1427 vxge_debug_init(VXGE_ERR,
1428 "DA config list entry failed for vpath:%d",
1429 vpath->device_id);
1430 return status;
1431 }
1432
1433 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1434 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1435 macaddr, macmask);
1436 if (status != VXGE_HW_OK)
1437 break;
1438 }
1439
1440 return status;
1441}
1442
1443/* Store all mac addresses from the list to the DA table */
1444static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1445{
1446 enum vxge_hw_status status = VXGE_HW_OK;
1447 struct macInfo mac_info;
1448 u8 *mac_address = NULL;
1449 struct list_head *entry, *next;
1450
1451 memset(&mac_info, 0, sizeof(struct macInfo));
1452
1453 if (vpath->is_open) {
1454 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1455 mac_address =
1456 (u8 *)&
1457 ((struct vxge_mac_addrs *)entry)->macaddr;
1458 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1459 ((struct vxge_mac_addrs *)entry)->state =
1460 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1461 /* does this mac address already exist in da table? */
1462 status = vxge_search_mac_addr_in_da_table(vpath,
1463 &mac_info);
1464 if (status != VXGE_HW_OK) {
1465 /* Add this mac address to the DA table */
1466 status = vxge_hw_vpath_mac_addr_add(
1467 vpath->handle, mac_info.macaddr,
1468 mac_info.macmask,
1469 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1470 if (status != VXGE_HW_OK) {
1471 vxge_debug_init(VXGE_ERR,
1472 "DA add entry failed for vpath:%d",
1473 vpath->device_id);
1474 ((struct vxge_mac_addrs *)entry)->state
1475 = VXGE_LL_MAC_ADDR_IN_LIST;
1476 }
1477 }
1478 }
1479 }
1480
1481 return status;
1482}
1483
1484/* Store all vlan ids from the list to the vid table */
1485static enum vxge_hw_status
1486vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1487{
1488 enum vxge_hw_status status = VXGE_HW_OK;
1489 struct vxgedev *vdev = vpath->vdev;
1490 u16 vid;
1491
1492 if (vdev->vlgrp && vpath->is_open) {
1493
1494 for (vid = 0; vid < VLAN_N_VID; vid++) {
1495 if (!vlan_group_get_device(vdev->vlgrp, vid))
1496 continue;
1497 /* Add these vlan to the vid table */
1498 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1499 }
1500 }
1501
1502 return status;
1503}
1504
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001505/*
1506 * vxge_reset_vpath
1507 * @vdev: pointer to vdev
1508 * @vp_id: vpath to reset
1509 *
1510 * Resets the vpath
1511*/
1512static int vxge_reset_vpath(struct vxgedev *vdev, int vp_id)
1513{
1514 enum vxge_hw_status status = VXGE_HW_OK;
Jon Mason7adf7d12010-07-15 08:47:24 +00001515 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001516 int ret = 0;
1517
1518 /* check if device is down already */
1519 if (unlikely(!is_vxge_card_up(vdev)))
1520 return 0;
1521
1522 /* is device reset already scheduled */
1523 if (test_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1524 return 0;
1525
Jon Mason7adf7d12010-07-15 08:47:24 +00001526 if (vpath->handle) {
1527 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001528 if (is_vxge_card_up(vdev) &&
Jon Mason7adf7d12010-07-15 08:47:24 +00001529 vxge_hw_vpath_recover_from_reset(vpath->handle)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001530 != VXGE_HW_OK) {
1531 vxge_debug_init(VXGE_ERR,
1532 "vxge_hw_vpath_recover_from_reset"
1533 "failed for vpath:%d", vp_id);
1534 return status;
1535 }
1536 } else {
1537 vxge_debug_init(VXGE_ERR,
1538 "vxge_hw_vpath_reset failed for"
1539 "vpath:%d", vp_id);
1540 return status;
1541 }
1542 } else
1543 return VXGE_HW_FAIL;
1544
Jon Mason7adf7d12010-07-15 08:47:24 +00001545 vxge_restore_vpath_mac_addr(vpath);
1546 vxge_restore_vpath_vid_table(vpath);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001547
1548 /* Enable all broadcast */
Jon Mason7adf7d12010-07-15 08:47:24 +00001549 vxge_hw_vpath_bcast_enable(vpath->handle);
1550
1551 /* Enable all multicast */
1552 if (vdev->all_multi_flg) {
1553 status = vxge_hw_vpath_mcast_enable(vpath->handle);
1554 if (status != VXGE_HW_OK)
1555 vxge_debug_init(VXGE_ERR,
1556 "%s:%d Enabling multicast failed",
1557 __func__, __LINE__);
1558 }
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001559
1560 /* Enable the interrupts */
1561 vxge_vpath_intr_enable(vdev, vp_id);
1562
1563 smp_wmb();
1564
1565 /* Enable the flow of traffic through the vpath */
Jon Mason7adf7d12010-07-15 08:47:24 +00001566 vxge_hw_vpath_enable(vpath->handle);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001567
1568 smp_wmb();
Jon Mason7adf7d12010-07-15 08:47:24 +00001569 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
1570 vpath->ring.last_status = VXGE_HW_OK;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001571
1572 /* Vpath reset done */
1573 clear_bit(vp_id, &vdev->vp_reset);
1574
1575 /* Start the vpath queue */
Jon Mason98f45da2010-07-15 08:47:25 +00001576 if (netif_tx_queue_stopped(vpath->fifo.txq))
1577 netif_tx_wake_queue(vpath->fifo.txq);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001578
1579 return ret;
1580}
1581
Jon Mason16fded72011-01-18 15:02:21 +00001582/* Configure CI */
1583static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
1584{
1585 int i = 0;
1586
1587 /* Enable CI for RTI */
1588 if (vdev->config.intr_type == MSI_X) {
1589 for (i = 0; i < vdev->no_of_vpath; i++) {
1590 struct __vxge_hw_ring *hw_ring;
1591
1592 hw_ring = vdev->vpaths[i].ring.handle;
1593 vxge_hw_vpath_dynamic_rti_ci_set(hw_ring);
1594 }
1595 }
1596
1597 /* Enable CI for TTI */
1598 for (i = 0; i < vdev->no_of_vpath; i++) {
1599 struct __vxge_hw_fifo *hw_fifo = vdev->vpaths[i].fifo.handle;
1600 vxge_hw_vpath_tti_ci_set(hw_fifo);
1601 /*
1602 * For Inta (with or without napi), Set CI ON for only one
1603 * vpath. (Have only one free running timer).
1604 */
1605 if ((vdev->config.intr_type == INTA) && (i == 0))
1606 break;
1607 }
1608
1609 return;
1610}
1611
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001612static int do_vxge_reset(struct vxgedev *vdev, int event)
1613{
1614 enum vxge_hw_status status;
1615 int ret = 0, vp_id, i;
1616
1617 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1618
1619 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET)) {
1620 /* check if device is down already */
1621 if (unlikely(!is_vxge_card_up(vdev)))
1622 return 0;
1623
1624 /* is reset already scheduled */
1625 if (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
1626 return 0;
1627 }
1628
1629 if (event == VXGE_LL_FULL_RESET) {
Jon Mason2e41f642010-12-10 14:02:59 +00001630 netif_carrier_off(vdev->ndev);
1631
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001632 /* wait for all the vpath reset to complete */
1633 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1634 while (test_bit(vp_id, &vdev->vp_reset))
1635 msleep(50);
1636 }
1637
Jon Mason2e41f642010-12-10 14:02:59 +00001638 netif_carrier_on(vdev->ndev);
1639
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001640 /* if execution mode is set to debug, don't reset the adapter */
1641 if (unlikely(vdev->exec_mode)) {
1642 vxge_debug_init(VXGE_ERR,
1643 "%s: execution mode is debug, returning..",
1644 vdev->ndev->name);
Jon Mason7adf7d12010-07-15 08:47:24 +00001645 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1646 netif_tx_stop_all_queues(vdev->ndev);
1647 return 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001648 }
1649 }
1650
1651 if (event == VXGE_LL_FULL_RESET) {
Jon Mason4d2a5b42010-11-11 04:25:54 +00001652 vxge_hw_device_wait_receive_idle(vdev->devh);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001653 vxge_hw_device_intr_disable(vdev->devh);
1654
1655 switch (vdev->cric_err_event) {
1656 case VXGE_HW_EVENT_UNKNOWN:
Jon Masond03848e2010-07-15 08:47:23 +00001657 netif_tx_stop_all_queues(vdev->ndev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001658 vxge_debug_init(VXGE_ERR,
1659 "fatal: %s: Disabling device due to"
1660 "unknown error",
1661 vdev->ndev->name);
1662 ret = -EPERM;
1663 goto out;
1664 case VXGE_HW_EVENT_RESET_START:
1665 break;
1666 case VXGE_HW_EVENT_RESET_COMPLETE:
1667 case VXGE_HW_EVENT_LINK_DOWN:
1668 case VXGE_HW_EVENT_LINK_UP:
1669 case VXGE_HW_EVENT_ALARM_CLEARED:
1670 case VXGE_HW_EVENT_ECCERR:
1671 case VXGE_HW_EVENT_MRPCIM_ECCERR:
1672 ret = -EPERM;
1673 goto out;
1674 case VXGE_HW_EVENT_FIFO_ERR:
1675 case VXGE_HW_EVENT_VPATH_ERR:
1676 break;
1677 case VXGE_HW_EVENT_CRITICAL_ERR:
Jon Masond03848e2010-07-15 08:47:23 +00001678 netif_tx_stop_all_queues(vdev->ndev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001679 vxge_debug_init(VXGE_ERR,
1680 "fatal: %s: Disabling device due to"
1681 "serious error",
1682 vdev->ndev->name);
1683 /* SOP or device reset required */
1684 /* This event is not currently used */
1685 ret = -EPERM;
1686 goto out;
1687 case VXGE_HW_EVENT_SERR:
Jon Masond03848e2010-07-15 08:47:23 +00001688 netif_tx_stop_all_queues(vdev->ndev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001689 vxge_debug_init(VXGE_ERR,
1690 "fatal: %s: Disabling device due to"
1691 "serious error",
1692 vdev->ndev->name);
1693 ret = -EPERM;
1694 goto out;
1695 case VXGE_HW_EVENT_SRPCIM_SERR:
1696 case VXGE_HW_EVENT_MRPCIM_SERR:
1697 ret = -EPERM;
1698 goto out;
1699 case VXGE_HW_EVENT_SLOT_FREEZE:
Jon Masond03848e2010-07-15 08:47:23 +00001700 netif_tx_stop_all_queues(vdev->ndev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001701 vxge_debug_init(VXGE_ERR,
1702 "fatal: %s: Disabling device due to"
1703 "slot freeze",
1704 vdev->ndev->name);
1705 ret = -EPERM;
1706 goto out;
1707 default:
1708 break;
1709
1710 }
1711 }
1712
1713 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_START_RESET))
Jon Masond03848e2010-07-15 08:47:23 +00001714 netif_tx_stop_all_queues(vdev->ndev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001715
1716 if (event == VXGE_LL_FULL_RESET) {
1717 status = vxge_reset_all_vpaths(vdev);
1718 if (status != VXGE_HW_OK) {
1719 vxge_debug_init(VXGE_ERR,
1720 "fatal: %s: can not reset vpaths",
1721 vdev->ndev->name);
1722 ret = -EPERM;
1723 goto out;
1724 }
1725 }
1726
1727 if (event == VXGE_LL_COMPL_RESET) {
1728 for (i = 0; i < vdev->no_of_vpath; i++)
1729 if (vdev->vpaths[i].handle) {
1730 if (vxge_hw_vpath_recover_from_reset(
1731 vdev->vpaths[i].handle)
1732 != VXGE_HW_OK) {
1733 vxge_debug_init(VXGE_ERR,
1734 "vxge_hw_vpath_recover_"
1735 "from_reset failed for vpath: "
1736 "%d", i);
1737 ret = -EPERM;
1738 goto out;
1739 }
1740 } else {
1741 vxge_debug_init(VXGE_ERR,
1742 "vxge_hw_vpath_reset failed for "
1743 "vpath:%d", i);
1744 ret = -EPERM;
1745 goto out;
1746 }
1747 }
1748
1749 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET)) {
1750 /* Reprogram the DA table with populated mac addresses */
1751 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1752 vxge_restore_vpath_mac_addr(&vdev->vpaths[vp_id]);
1753 vxge_restore_vpath_vid_table(&vdev->vpaths[vp_id]);
1754 }
1755
1756 /* enable vpath interrupts */
1757 for (i = 0; i < vdev->no_of_vpath; i++)
1758 vxge_vpath_intr_enable(vdev, i);
1759
1760 vxge_hw_device_intr_enable(vdev->devh);
1761
1762 smp_wmb();
1763
1764 /* Indicate card up */
1765 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
1766
1767 /* Get the traffic to flow through the vpaths */
1768 for (i = 0; i < vdev->no_of_vpath; i++) {
1769 vxge_hw_vpath_enable(vdev->vpaths[i].handle);
1770 smp_wmb();
1771 vxge_hw_vpath_rx_doorbell_init(vdev->vpaths[i].handle);
1772 }
1773
Jon Masond03848e2010-07-15 08:47:23 +00001774 netif_tx_wake_all_queues(vdev->ndev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001775 }
1776
Jon Mason16fded72011-01-18 15:02:21 +00001777 /* configure CI */
1778 vxge_config_ci_for_tti_rti(vdev);
1779
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001780out:
1781 vxge_debug_entryexit(VXGE_TRACE,
1782 "%s:%d Exiting...", __func__, __LINE__);
1783
1784 /* Indicate reset done */
1785 if ((event == VXGE_LL_FULL_RESET) || (event == VXGE_LL_COMPL_RESET))
1786 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
1787 return ret;
1788}
1789
1790/*
1791 * vxge_reset
1792 * @vdev: pointer to ll device
1793 *
1794 * driver may reset the chip on events of serr, eccerr, etc
1795 */
Jon Mason2e41f642010-12-10 14:02:59 +00001796static void vxge_reset(struct work_struct *work)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001797{
Jon Mason2e41f642010-12-10 14:02:59 +00001798 struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1799
1800 if (!netif_running(vdev->ndev))
1801 return;
1802
1803 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001804}
1805
1806/**
1807 * vxge_poll - Receive handler when Receive Polling is used.
1808 * @dev: pointer to the device structure.
1809 * @budget: Number of packets budgeted to be processed in this iteration.
1810 *
1811 * This function comes into picture only if Receive side is being handled
1812 * through polling (called NAPI in linux). It mostly does what the normal
1813 * Rx interrupt handler does in terms of descriptor and packet processing
1814 * but not in an interrupt context. Also it will process a specified number
1815 * of packets at most in one iteration. This value is passed down by the
1816 * kernel as the function argument 'budget'.
1817 */
1818static int vxge_poll_msix(struct napi_struct *napi, int budget)
1819{
Jon Mason16fded72011-01-18 15:02:21 +00001820 struct vxge_ring *ring = container_of(napi, struct vxge_ring, napi);
1821 int pkts_processed;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001822 int budget_org = budget;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001823
Jon Mason16fded72011-01-18 15:02:21 +00001824 ring->budget = budget;
1825 ring->pkts_processed = 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001826 vxge_hw_vpath_poll_rx(ring->handle);
Jon Mason16fded72011-01-18 15:02:21 +00001827 pkts_processed = ring->pkts_processed;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001828
1829 if (ring->pkts_processed < budget_org) {
1830 napi_complete(napi);
Jon Mason16fded72011-01-18 15:02:21 +00001831
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001832 /* Re enable the Rx interrupts for the vpath */
1833 vxge_hw_channel_msix_unmask(
1834 (struct __vxge_hw_channel *)ring->handle,
1835 ring->rx_vector_no);
Jon Mason16fded72011-01-18 15:02:21 +00001836 mmiowb();
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001837 }
1838
Jon Mason16fded72011-01-18 15:02:21 +00001839 /* We are copying and returning the local variable, in case if after
1840 * clearing the msix interrupt above, if the interrupt fires right
1841 * away which can preempt this NAPI thread */
1842 return pkts_processed;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001843}
1844
1845static int vxge_poll_inta(struct napi_struct *napi, int budget)
1846{
1847 struct vxgedev *vdev = container_of(napi, struct vxgedev, napi);
1848 int pkts_processed = 0;
1849 int i;
1850 int budget_org = budget;
1851 struct vxge_ring *ring;
1852
Joe Perchesd8ee7072010-11-15 10:13:58 +00001853 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001854
1855 for (i = 0; i < vdev->no_of_vpath; i++) {
1856 ring = &vdev->vpaths[i].ring;
1857 ring->budget = budget;
Jon Mason16fded72011-01-18 15:02:21 +00001858 ring->pkts_processed = 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001859 vxge_hw_vpath_poll_rx(ring->handle);
1860 pkts_processed += ring->pkts_processed;
1861 budget -= ring->pkts_processed;
1862 if (budget <= 0)
1863 break;
1864 }
1865
1866 VXGE_COMPLETE_ALL_TX(vdev);
1867
1868 if (pkts_processed < budget_org) {
1869 napi_complete(napi);
1870 /* Re enable the Rx interrupts for the ring */
1871 vxge_hw_device_unmask_all(hldev);
1872 vxge_hw_device_flush_io(hldev);
1873 }
1874
1875 return pkts_processed;
1876}
1877
1878#ifdef CONFIG_NET_POLL_CONTROLLER
1879/**
1880 * vxge_netpoll - netpoll event handler entry point
1881 * @dev : pointer to the device structure.
1882 * Description:
1883 * This function will be called by upper layer to check for events on the
1884 * interface in situations where interrupts are disabled. It is used for
1885 * specific in-kernel networking tasks, such as remote consoles and kernel
1886 * debugging over the network (example netdump in RedHat).
1887 */
1888static void vxge_netpoll(struct net_device *dev)
1889{
Jon Mason2c913082010-11-11 04:26:03 +00001890 struct __vxge_hw_device *hldev;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001891 struct vxgedev *vdev;
1892
Joe Perches5f54ceb2010-11-15 11:12:30 +00001893 vdev = netdev_priv(dev);
Joe Perchesd8ee7072010-11-15 10:13:58 +00001894 hldev = pci_get_drvdata(vdev->pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001895
1896 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1897
1898 if (pci_channel_offline(vdev->pdev))
1899 return;
1900
1901 disable_irq(dev->irq);
1902 vxge_hw_device_clear_tx_rx(hldev);
1903
1904 vxge_hw_device_clear_tx_rx(hldev);
1905 VXGE_COMPLETE_ALL_RX(vdev);
1906 VXGE_COMPLETE_ALL_TX(vdev);
1907
1908 enable_irq(dev->irq);
1909
1910 vxge_debug_entryexit(VXGE_TRACE,
1911 "%s:%d Exiting...", __func__, __LINE__);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001912}
1913#endif
1914
1915/* RTH configuration */
1916static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1917{
1918 enum vxge_hw_status status = VXGE_HW_OK;
1919 struct vxge_hw_rth_hash_types hash_types;
1920 u8 itable[256] = {0}; /* indirection table */
1921 u8 mtable[256] = {0}; /* CPU to vpath mapping */
1922 int index;
1923
1924 /*
1925 * Filling
1926 * - itable with bucket numbers
1927 * - mtable with bucket-to-vpath mapping
1928 */
1929 for (index = 0; index < (1 << vdev->config.rth_bkt_sz); index++) {
1930 itable[index] = index;
1931 mtable[index] = index % vdev->no_of_vpath;
1932 }
1933
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001934 /* set indirection table, bucket-to-vpath mapping */
1935 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1936 vdev->no_of_vpath,
1937 mtable, itable,
1938 vdev->config.rth_bkt_sz);
1939 if (status != VXGE_HW_OK) {
1940 vxge_debug_init(VXGE_ERR,
1941 "RTH indirection table configuration failed "
1942 "for vpath:%d", vdev->vpaths[0].device_id);
1943 return status;
1944 }
1945
Jon Mason47f01db2010-11-11 04:25:53 +00001946 /* Fill RTH hash types */
1947 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1948 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1949 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1950 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1951 hash_types.hash_type_tcpipv6ex_en =
1952 vdev->config.rth_hash_type_tcpipv6ex;
1953 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1954
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001955 /*
Jon Mason47f01db2010-11-11 04:25:53 +00001956 * Because the itable_set() method uses the active_table field
1957 * for the target virtual path the RTH config should be updated
1958 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1959 * when steering frames.
1960 */
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001961 for (index = 0; index < vdev->no_of_vpath; index++) {
1962 status = vxge_hw_vpath_rts_rth_set(
1963 vdev->vpaths[index].handle,
1964 vdev->config.rth_algorithm,
1965 &hash_types,
1966 vdev->config.rth_bkt_sz);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001967 if (status != VXGE_HW_OK) {
1968 vxge_debug_init(VXGE_ERR,
1969 "RTH configuration failed for vpath:%d",
1970 vdev->vpaths[index].device_id);
1971 return status;
1972 }
1973 }
1974
1975 return status;
1976}
1977
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001978/* reset vpaths */
Jon Mason4d2a5b42010-11-11 04:25:54 +00001979enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001980{
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001981 enum vxge_hw_status status = VXGE_HW_OK;
Jon Mason7adf7d12010-07-15 08:47:24 +00001982 struct vxge_vpath *vpath;
1983 int i;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001984
Jon Mason7adf7d12010-07-15 08:47:24 +00001985 for (i = 0; i < vdev->no_of_vpath; i++) {
1986 vpath = &vdev->vpaths[i];
1987 if (vpath->handle) {
1988 if (vxge_hw_vpath_reset(vpath->handle) == VXGE_HW_OK) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001989 if (is_vxge_card_up(vdev) &&
1990 vxge_hw_vpath_recover_from_reset(
Jon Mason7adf7d12010-07-15 08:47:24 +00001991 vpath->handle) != VXGE_HW_OK) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001992 vxge_debug_init(VXGE_ERR,
1993 "vxge_hw_vpath_recover_"
1994 "from_reset failed for vpath: "
1995 "%d", i);
1996 return status;
1997 }
1998 } else {
1999 vxge_debug_init(VXGE_ERR,
2000 "vxge_hw_vpath_reset failed for "
2001 "vpath:%d", i);
2002 return status;
2003 }
2004 }
Jon Mason7adf7d12010-07-15 08:47:24 +00002005 }
2006
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002007 return status;
2008}
2009
2010/* close vpaths */
stephen hemminger42821a52010-10-21 07:50:53 +00002011static void vxge_close_vpaths(struct vxgedev *vdev, int index)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002012{
Jon Mason7adf7d12010-07-15 08:47:24 +00002013 struct vxge_vpath *vpath;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002014 int i;
Jon Mason7adf7d12010-07-15 08:47:24 +00002015
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002016 for (i = index; i < vdev->no_of_vpath; i++) {
Jon Mason7adf7d12010-07-15 08:47:24 +00002017 vpath = &vdev->vpaths[i];
2018
2019 if (vpath->handle && vpath->is_open) {
2020 vxge_hw_vpath_close(vpath->handle);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002021 vdev->stats.vpaths_open--;
2022 }
Jon Mason7adf7d12010-07-15 08:47:24 +00002023 vpath->is_open = 0;
2024 vpath->handle = NULL;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002025 }
2026}
2027
2028/* open vpaths */
stephen hemminger42821a52010-10-21 07:50:53 +00002029static int vxge_open_vpaths(struct vxgedev *vdev)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002030{
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002031 struct vxge_hw_vpath_attr attr;
Jon Mason7adf7d12010-07-15 08:47:24 +00002032 enum vxge_hw_status status;
2033 struct vxge_vpath *vpath;
2034 u32 vp_id = 0;
2035 int i;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002036
2037 for (i = 0; i < vdev->no_of_vpath; i++) {
Jon Mason7adf7d12010-07-15 08:47:24 +00002038 vpath = &vdev->vpaths[i];
Jon Mason7adf7d12010-07-15 08:47:24 +00002039 vxge_assert(vpath->is_configured);
Jon Masone7935c92010-11-11 04:26:00 +00002040
2041 if (!vdev->titan1) {
2042 struct vxge_hw_vp_config *vcfg;
2043 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2044
2045 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2046 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2047 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2048 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2049 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2050 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2051 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2052 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2053 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2054 }
2055
Jon Mason7adf7d12010-07-15 08:47:24 +00002056 attr.vp_id = vpath->device_id;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002057 attr.fifo_attr.callback = vxge_xmit_compl;
2058 attr.fifo_attr.txdl_term = vxge_tx_term;
2059 attr.fifo_attr.per_txdl_space = sizeof(struct vxge_tx_priv);
Jon Mason7adf7d12010-07-15 08:47:24 +00002060 attr.fifo_attr.userdata = &vpath->fifo;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002061
2062 attr.ring_attr.callback = vxge_rx_1b_compl;
2063 attr.ring_attr.rxd_init = vxge_rx_initial_replenish;
2064 attr.ring_attr.rxd_term = vxge_rx_term;
2065 attr.ring_attr.per_rxd_space = sizeof(struct vxge_rx_priv);
Jon Mason7adf7d12010-07-15 08:47:24 +00002066 attr.ring_attr.userdata = &vpath->ring;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002067
Jon Mason7adf7d12010-07-15 08:47:24 +00002068 vpath->ring.ndev = vdev->ndev;
2069 vpath->ring.pdev = vdev->pdev;
Jon Mason528f7272010-12-10 14:02:56 +00002070
Jon Mason7adf7d12010-07-15 08:47:24 +00002071 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002072 if (status == VXGE_HW_OK) {
Jon Mason7adf7d12010-07-15 08:47:24 +00002073 vpath->fifo.handle =
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002074 (struct __vxge_hw_fifo *)attr.fifo_attr.userdata;
Jon Mason7adf7d12010-07-15 08:47:24 +00002075 vpath->ring.handle =
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002076 (struct __vxge_hw_ring *)attr.ring_attr.userdata;
Jon Mason7adf7d12010-07-15 08:47:24 +00002077 vpath->fifo.tx_steering_type =
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002078 vdev->config.tx_steering_type;
Jon Mason7adf7d12010-07-15 08:47:24 +00002079 vpath->fifo.ndev = vdev->ndev;
2080 vpath->fifo.pdev = vdev->pdev;
Jon Mason98f45da2010-07-15 08:47:25 +00002081 if (vdev->config.tx_steering_type)
2082 vpath->fifo.txq =
2083 netdev_get_tx_queue(vdev->ndev, i);
2084 else
2085 vpath->fifo.txq =
2086 netdev_get_tx_queue(vdev->ndev, 0);
Jon Mason7adf7d12010-07-15 08:47:24 +00002087 vpath->fifo.indicate_max_pkts =
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002088 vdev->config.fifo_indicate_max_pkts;
Jon Mason16fded72011-01-18 15:02:21 +00002089 vpath->fifo.tx_vector_no = 0;
Jon Mason7adf7d12010-07-15 08:47:24 +00002090 vpath->ring.rx_vector_no = 0;
Jon Masonb81b3732010-11-11 04:25:58 +00002091 vpath->ring.rx_hwts = vdev->rx_hwts;
Jon Mason7adf7d12010-07-15 08:47:24 +00002092 vpath->is_open = 1;
2093 vdev->vp_handles[i] = vpath->handle;
Jon Mason7adf7d12010-07-15 08:47:24 +00002094 vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002095 vdev->stats.vpaths_open++;
2096 } else {
2097 vdev->stats.vpath_open_fail++;
Jon Mason528f7272010-12-10 14:02:56 +00002098 vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2099 "open with status: %d",
2100 vdev->ndev->name, vpath->device_id,
2101 status);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002102 vxge_close_vpaths(vdev, 0);
2103 return -EPERM;
2104 }
2105
Jon Mason7adf7d12010-07-15 08:47:24 +00002106 vp_id = vpath->handle->vpath->vp_id;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002107 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2108 }
Jon Mason528f7272010-12-10 14:02:56 +00002109
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002110 return VXGE_HW_OK;
2111}
2112
Jon Mason16fded72011-01-18 15:02:21 +00002113/**
2114 * adaptive_coalesce_tx_interrupts - Changes the interrupt coalescing
2115 * if the interrupts are not within a range
2116 * @fifo: pointer to transmit fifo structure
2117 * Description: The function changes boundary timer and restriction timer
2118 * value depends on the traffic
2119 * Return Value: None
2120 */
2121static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
2122{
2123 fifo->interrupt_count++;
2124 if (jiffies > fifo->jiffies + HZ / 100) {
2125 struct __vxge_hw_fifo *hw_fifo = fifo->handle;
2126
2127 fifo->jiffies = jiffies;
2128 if (fifo->interrupt_count > VXGE_T1A_MAX_TX_INTERRUPT_COUNT &&
2129 hw_fifo->rtimer != VXGE_TTI_RTIMER_ADAPT_VAL) {
2130 hw_fifo->rtimer = VXGE_TTI_RTIMER_ADAPT_VAL;
2131 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2132 } else if (hw_fifo->rtimer != 0) {
2133 hw_fifo->rtimer = 0;
2134 vxge_hw_vpath_dynamic_tti_rtimer_set(hw_fifo);
2135 }
2136 fifo->interrupt_count = 0;
2137 }
2138}
2139
2140/**
2141 * adaptive_coalesce_rx_interrupts - Changes the interrupt coalescing
2142 * if the interrupts are not within a range
2143 * @ring: pointer to receive ring structure
2144 * Description: The function increases of decreases the packet counts within
2145 * the ranges of traffic utilization, if the interrupts due to this ring are
2146 * not within a fixed range.
2147 * Return Value: Nothing
2148 */
2149static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
2150{
2151 ring->interrupt_count++;
2152 if (jiffies > ring->jiffies + HZ / 100) {
2153 struct __vxge_hw_ring *hw_ring = ring->handle;
2154
2155 ring->jiffies = jiffies;
2156 if (ring->interrupt_count > VXGE_T1A_MAX_INTERRUPT_COUNT &&
2157 hw_ring->rtimer != VXGE_RTI_RTIMER_ADAPT_VAL) {
2158 hw_ring->rtimer = VXGE_RTI_RTIMER_ADAPT_VAL;
2159 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2160 } else if (hw_ring->rtimer != 0) {
2161 hw_ring->rtimer = 0;
2162 vxge_hw_vpath_dynamic_rti_rtimer_set(hw_ring);
2163 }
2164 ring->interrupt_count = 0;
2165 }
2166}
2167
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002168/*
2169 * vxge_isr_napi
2170 * @irq: the irq of the device.
2171 * @dev_id: a void pointer to the hldev structure of the Titan device
2172 * @ptregs: pointer to the registers pushed on the stack.
2173 *
2174 * This function is the ISR handler of the device when napi is enabled. It
2175 * identifies the reason for the interrupt and calls the relevant service
2176 * routines.
2177 */
2178static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2179{
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002180 struct net_device *dev;
Sreenivasa Honnura5d165b2009-07-01 21:16:37 +00002181 struct __vxge_hw_device *hldev;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002182 u64 reason;
2183 enum vxge_hw_status status;
Jon Mason2c913082010-11-11 04:26:03 +00002184 struct vxgedev *vdev = (struct vxgedev *)dev_id;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002185
2186 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2187
Sreenivasa Honnura5d165b2009-07-01 21:16:37 +00002188 dev = vdev->ndev;
Joe Perchesd8ee7072010-11-15 10:13:58 +00002189 hldev = pci_get_drvdata(vdev->pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002190
2191 if (pci_channel_offline(vdev->pdev))
2192 return IRQ_NONE;
2193
2194 if (unlikely(!is_vxge_card_up(vdev)))
Jon Mason4d2a5b42010-11-11 04:25:54 +00002195 return IRQ_HANDLED;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002196
Jon Mason528f7272010-12-10 14:02:56 +00002197 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002198 if (status == VXGE_HW_OK) {
2199 vxge_hw_device_mask_all(hldev);
2200
2201 if (reason &
2202 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(
2203 vdev->vpaths_deployed >>
2204 (64 - VXGE_HW_MAX_VIRTUAL_PATHS))) {
2205
2206 vxge_hw_device_clear_tx_rx(hldev);
2207 napi_schedule(&vdev->napi);
2208 vxge_debug_intr(VXGE_TRACE,
2209 "%s:%d Exiting...", __func__, __LINE__);
2210 return IRQ_HANDLED;
2211 } else
2212 vxge_hw_device_unmask_all(hldev);
2213 } else if (unlikely((status == VXGE_HW_ERR_VPATH) ||
2214 (status == VXGE_HW_ERR_CRITICAL) ||
2215 (status == VXGE_HW_ERR_FIFO))) {
2216 vxge_hw_device_mask_all(hldev);
2217 vxge_hw_device_flush_io(hldev);
2218 return IRQ_HANDLED;
2219 } else if (unlikely(status == VXGE_HW_ERR_SLOT_FREEZE))
2220 return IRQ_HANDLED;
2221
2222 vxge_debug_intr(VXGE_TRACE, "%s:%d Exiting...", __func__, __LINE__);
2223 return IRQ_NONE;
2224}
2225
2226#ifdef CONFIG_PCI_MSI
2227
Jon Mason16fded72011-01-18 15:02:21 +00002228static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002229{
2230 struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
2231
Jon Mason16fded72011-01-18 15:02:21 +00002232 adaptive_coalesce_tx_interrupts(fifo);
2233
2234 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)fifo->handle,
2235 fifo->tx_vector_no);
2236
2237 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)fifo->handle,
2238 fifo->tx_vector_no);
2239
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002240 VXGE_COMPLETE_VPATH_TX(fifo);
2241
Jon Mason16fded72011-01-18 15:02:21 +00002242 vxge_hw_channel_msix_unmask((struct __vxge_hw_channel *)fifo->handle,
2243 fifo->tx_vector_no);
2244
2245 mmiowb();
2246
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002247 return IRQ_HANDLED;
2248}
2249
Jon Mason16fded72011-01-18 15:02:21 +00002250static irqreturn_t vxge_rx_msix_napi_handle(int irq, void *dev_id)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002251{
2252 struct vxge_ring *ring = (struct vxge_ring *)dev_id;
2253
Jon Mason16fded72011-01-18 15:02:21 +00002254 adaptive_coalesce_rx_interrupts(ring);
2255
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002256 vxge_hw_channel_msix_mask((struct __vxge_hw_channel *)ring->handle,
Jon Mason16fded72011-01-18 15:02:21 +00002257 ring->rx_vector_no);
2258
2259 vxge_hw_channel_msix_clear((struct __vxge_hw_channel *)ring->handle,
2260 ring->rx_vector_no);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002261
2262 napi_schedule(&ring->napi);
2263 return IRQ_HANDLED;
2264}
2265
2266static irqreturn_t
2267vxge_alarm_msix_handle(int irq, void *dev_id)
2268{
2269 int i;
2270 enum vxge_hw_status status;
2271 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2272 struct vxgedev *vdev = vpath->vdev;
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002273 int msix_id = (vpath->handle->vpath->vp_id *
2274 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002275
2276 for (i = 0; i < vdev->no_of_vpath; i++) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002277 /* Reduce the chance of losing alarm interrupts by masking
Jon Mason16fded72011-01-18 15:02:21 +00002278 * the vector. A pending bit will be set if an alarm is
2279 * generated and on unmask the interrupt will be fired.
2280 */
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002281 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
Jon Mason16fded72011-01-18 15:02:21 +00002282 vxge_hw_vpath_msix_clear(vdev->vpaths[i].handle, msix_id);
2283 mmiowb();
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002284
2285 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2286 vdev->exec_mode);
2287 if (status == VXGE_HW_OK) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002288 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
Jon Mason16fded72011-01-18 15:02:21 +00002289 msix_id);
2290 mmiowb();
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002291 continue;
2292 }
2293 vxge_debug_intr(VXGE_ERR,
2294 "%s: vxge_hw_vpath_alarm_process failed %x ",
2295 VXGE_DRIVER_NAME, status);
2296 }
2297 return IRQ_HANDLED;
2298}
2299
2300static int vxge_alloc_msix(struct vxgedev *vdev)
2301{
2302 int j, i, ret = 0;
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002303 int msix_intr_vect = 0, temp;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002304 vdev->intr_cnt = 0;
2305
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002306start:
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002307 /* Tx/Rx MSIX Vectors count */
2308 vdev->intr_cnt = vdev->no_of_vpath * 2;
2309
2310 /* Alarm MSIX Vectors count */
2311 vdev->intr_cnt++;
2312
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00002313 vdev->entries = kcalloc(vdev->intr_cnt, sizeof(struct msix_entry),
2314 GFP_KERNEL);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002315 if (!vdev->entries) {
2316 vxge_debug_init(VXGE_ERR,
2317 "%s: memory allocation failed",
2318 VXGE_DRIVER_NAME);
Michal Schmidtcc413d92010-06-24 04:13:44 +00002319 ret = -ENOMEM;
2320 goto alloc_entries_failed;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002321 }
2322
Joe Perchesbaeb2ff2010-08-11 07:02:48 +00002323 vdev->vxge_entries = kcalloc(vdev->intr_cnt,
2324 sizeof(struct vxge_msix_entry),
2325 GFP_KERNEL);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002326 if (!vdev->vxge_entries) {
2327 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2328 VXGE_DRIVER_NAME);
Michal Schmidtcc413d92010-06-24 04:13:44 +00002329 ret = -ENOMEM;
2330 goto alloc_vxge_entries_failed;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002331 }
2332
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002333 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002334
2335 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2336
2337 /* Initialize the fifo vector */
2338 vdev->entries[j].entry = msix_intr_vect;
2339 vdev->vxge_entries[j].entry = msix_intr_vect;
2340 vdev->vxge_entries[j].in_use = 0;
2341 j++;
2342
2343 /* Initialize the ring vector */
2344 vdev->entries[j].entry = msix_intr_vect + 1;
2345 vdev->vxge_entries[j].entry = msix_intr_vect + 1;
2346 vdev->vxge_entries[j].in_use = 0;
2347 j++;
2348 }
2349
2350 /* Initialize the alarm vector */
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002351 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2352 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002353 vdev->vxge_entries[j].in_use = 0;
2354
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002355 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002356 if (ret > 0) {
2357 vxge_debug_init(VXGE_ERR,
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002358 "%s: MSI-X enable failed for %d vectors, ret: %d",
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002359 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
Michal Schmidtcc413d92010-06-24 04:13:44 +00002360 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
2361 ret = -ENODEV;
2362 goto enable_msix_failed;
2363 }
2364
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002365 kfree(vdev->entries);
2366 kfree(vdev->vxge_entries);
2367 vdev->entries = NULL;
2368 vdev->vxge_entries = NULL;
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002369 /* Try with less no of vector by reducing no of vpaths count */
2370 temp = (ret - 1)/2;
2371 vxge_close_vpaths(vdev, temp);
2372 vdev->no_of_vpath = temp;
2373 goto start;
Michal Schmidtcc413d92010-06-24 04:13:44 +00002374 } else if (ret < 0) {
2375 ret = -ENODEV;
2376 goto enable_msix_failed;
2377 }
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002378 return 0;
Michal Schmidtcc413d92010-06-24 04:13:44 +00002379
2380enable_msix_failed:
2381 kfree(vdev->vxge_entries);
2382alloc_vxge_entries_failed:
2383 kfree(vdev->entries);
2384alloc_entries_failed:
2385 return ret;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002386}
2387
2388static int vxge_enable_msix(struct vxgedev *vdev)
2389{
2390
2391 int i, ret = 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002392 /* 0 - Tx, 1 - Rx */
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002393 int tim_msix_id[4] = {0, 1, 0, 0};
2394
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002395 vdev->intr_cnt = 0;
2396
2397 /* allocate msix vectors */
2398 ret = vxge_alloc_msix(vdev);
2399 if (!ret) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002400 for (i = 0; i < vdev->no_of_vpath; i++) {
Jon Mason7adf7d12010-07-15 08:47:24 +00002401 struct vxge_vpath *vpath = &vdev->vpaths[i];
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002402
Jon Mason7adf7d12010-07-15 08:47:24 +00002403 /* If fifo or ring are not enabled, the MSIX vector for
2404 * it should be set to 0.
2405 */
2406 vpath->ring.rx_vector_no = (vpath->device_id *
2407 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002408
Jon Mason16fded72011-01-18 15:02:21 +00002409 vpath->fifo.tx_vector_no = (vpath->device_id *
2410 VXGE_HW_VPATH_MSIX_ACTIVE);
2411
Jon Mason7adf7d12010-07-15 08:47:24 +00002412 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
2413 VXGE_ALARM_MSIX_ID);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002414 }
2415 }
2416
2417 return ret;
2418}
2419
2420static void vxge_rem_msix_isr(struct vxgedev *vdev)
2421{
2422 int intr_cnt;
2423
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002424 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002425 intr_cnt++) {
2426 if (vdev->vxge_entries[intr_cnt].in_use) {
2427 synchronize_irq(vdev->entries[intr_cnt].vector);
2428 free_irq(vdev->entries[intr_cnt].vector,
2429 vdev->vxge_entries[intr_cnt].arg);
2430 vdev->vxge_entries[intr_cnt].in_use = 0;
2431 }
2432 }
2433
2434 kfree(vdev->entries);
2435 kfree(vdev->vxge_entries);
2436 vdev->entries = NULL;
2437 vdev->vxge_entries = NULL;
2438
2439 if (vdev->config.intr_type == MSI_X)
2440 pci_disable_msix(vdev->pdev);
2441}
2442#endif
2443
2444static void vxge_rem_isr(struct vxgedev *vdev)
2445{
Jon Mason2c913082010-11-11 04:26:03 +00002446 struct __vxge_hw_device *hldev;
Joe Perchesd8ee7072010-11-15 10:13:58 +00002447 hldev = pci_get_drvdata(vdev->pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002448
2449#ifdef CONFIG_PCI_MSI
2450 if (vdev->config.intr_type == MSI_X) {
2451 vxge_rem_msix_isr(vdev);
2452 } else
2453#endif
2454 if (vdev->config.intr_type == INTA) {
2455 synchronize_irq(vdev->pdev->irq);
Sreenivasa Honnura5d165b2009-07-01 21:16:37 +00002456 free_irq(vdev->pdev->irq, vdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002457 }
2458}
2459
2460static int vxge_add_isr(struct vxgedev *vdev)
2461{
2462 int ret = 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002463#ifdef CONFIG_PCI_MSI
2464 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002465 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2466
2467 if (vdev->config.intr_type == MSI_X)
2468 ret = vxge_enable_msix(vdev);
2469
2470 if (ret) {
2471 vxge_debug_init(VXGE_ERR,
2472 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
Sreenivasa Honnureb5f10c2009-10-05 01:57:29 +00002473 vxge_debug_init(VXGE_ERR,
2474 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2475 vdev->config.intr_type = INTA;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002476 }
2477
2478 if (vdev->config.intr_type == MSI_X) {
2479 for (intr_idx = 0;
2480 intr_idx < (vdev->no_of_vpath *
2481 VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
2482
2483 msix_idx = intr_idx % VXGE_HW_VPATH_MSIX_ACTIVE;
2484 irq_req = 0;
2485
2486 switch (msix_idx) {
2487 case 0:
2488 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002489 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2490 vdev->ndev->name,
2491 vdev->entries[intr_cnt].entry,
2492 pci_fun, vp_idx);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002493 ret = request_irq(
2494 vdev->entries[intr_cnt].vector,
2495 vxge_tx_msix_handle, 0,
2496 vdev->desc[intr_cnt],
2497 &vdev->vpaths[vp_idx].fifo);
2498 vdev->vxge_entries[intr_cnt].arg =
2499 &vdev->vpaths[vp_idx].fifo;
2500 irq_req = 1;
2501 break;
2502 case 1:
2503 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002504 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2505 vdev->ndev->name,
2506 vdev->entries[intr_cnt].entry,
2507 pci_fun, vp_idx);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002508 ret = request_irq(
2509 vdev->entries[intr_cnt].vector,
2510 vxge_rx_msix_napi_handle,
2511 0,
2512 vdev->desc[intr_cnt],
2513 &vdev->vpaths[vp_idx].ring);
2514 vdev->vxge_entries[intr_cnt].arg =
2515 &vdev->vpaths[vp_idx].ring;
2516 irq_req = 1;
2517 break;
2518 }
2519
2520 if (ret) {
2521 vxge_debug_init(VXGE_ERR,
2522 "%s: MSIX - %d Registration failed",
2523 vdev->ndev->name, intr_cnt);
2524 vxge_rem_msix_isr(vdev);
Sreenivasa Honnureb5f10c2009-10-05 01:57:29 +00002525 vdev->config.intr_type = INTA;
2526 vxge_debug_init(VXGE_ERR,
2527 "%s: Defaulting to INTA"
2528 , vdev->ndev->name);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002529 goto INTA_MODE;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002530 }
2531
2532 if (irq_req) {
2533 /* We requested for this msix interrupt */
2534 vdev->vxge_entries[intr_cnt].in_use = 1;
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002535 msix_idx += vdev->vpaths[vp_idx].device_id *
2536 VXGE_HW_VPATH_MSIX_ACTIVE;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002537 vxge_hw_vpath_msix_unmask(
2538 vdev->vpaths[vp_idx].handle,
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002539 msix_idx);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002540 intr_cnt++;
2541 }
2542
2543 /* Point to next vpath handler */
Joe Perches8e95a202009-12-03 07:58:21 +00002544 if (((intr_idx + 1) % VXGE_HW_VPATH_MSIX_ACTIVE == 0) &&
2545 (vp_idx < (vdev->no_of_vpath - 1)))
2546 vp_idx++;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002547 }
2548
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002549 intr_cnt = vdev->no_of_vpath * 2;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002550 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002551 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2552 vdev->ndev->name,
2553 vdev->entries[intr_cnt].entry,
2554 pci_fun);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002555 /* For Alarm interrupts */
2556 ret = request_irq(vdev->entries[intr_cnt].vector,
2557 vxge_alarm_msix_handle, 0,
2558 vdev->desc[intr_cnt],
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002559 &vdev->vpaths[0]);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002560 if (ret) {
2561 vxge_debug_init(VXGE_ERR,
2562 "%s: MSIX - %d Registration failed",
2563 vdev->ndev->name, intr_cnt);
2564 vxge_rem_msix_isr(vdev);
Sreenivasa Honnureb5f10c2009-10-05 01:57:29 +00002565 vdev->config.intr_type = INTA;
2566 vxge_debug_init(VXGE_ERR,
2567 "%s: Defaulting to INTA",
2568 vdev->ndev->name);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002569 goto INTA_MODE;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002570 }
2571
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002572 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2573 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002574 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002575 msix_idx);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002576 vdev->vxge_entries[intr_cnt].in_use = 1;
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002577 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002578 }
2579INTA_MODE:
2580#endif
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002581
2582 if (vdev->config.intr_type == INTA) {
Sreenivasa Honnurb59c9452010-03-28 22:11:41 +00002583 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2584 "%s:vxge:INTA", vdev->ndev->name);
Sreenivasa Honnureb5f10c2009-10-05 01:57:29 +00002585 vxge_hw_device_set_intr_type(vdev->devh,
2586 VXGE_HW_INTR_MODE_IRQLINE);
Jon Mason16fded72011-01-18 15:02:21 +00002587
2588 vxge_hw_vpath_tti_ci_set(vdev->vpaths[0].fifo.handle);
2589
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002590 ret = request_irq((int) vdev->pdev->irq,
2591 vxge_isr_napi,
Sreenivasa Honnura5d165b2009-07-01 21:16:37 +00002592 IRQF_SHARED, vdev->desc[0], vdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002593 if (ret) {
2594 vxge_debug_init(VXGE_ERR,
2595 "%s %s-%d: ISR registration failed",
2596 VXGE_DRIVER_NAME, "IRQ", vdev->pdev->irq);
2597 return -ENODEV;
2598 }
2599 vxge_debug_init(VXGE_TRACE,
2600 "new %s-%d line allocated",
2601 "IRQ", vdev->pdev->irq);
2602 }
2603
2604 return VXGE_HW_OK;
2605}
2606
2607static void vxge_poll_vp_reset(unsigned long data)
2608{
2609 struct vxgedev *vdev = (struct vxgedev *)data;
2610 int i, j = 0;
2611
2612 for (i = 0; i < vdev->no_of_vpath; i++) {
2613 if (test_bit(i, &vdev->vp_reset)) {
2614 vxge_reset_vpath(vdev, i);
2615 j++;
2616 }
2617 }
2618 if (j && (vdev->config.intr_type != MSI_X)) {
2619 vxge_hw_device_unmask_all(vdev->devh);
2620 vxge_hw_device_flush_io(vdev->devh);
2621 }
2622
2623 mod_timer(&vdev->vp_reset_timer, jiffies + HZ / 2);
2624}
2625
2626static void vxge_poll_vp_lockup(unsigned long data)
2627{
2628 struct vxgedev *vdev = (struct vxgedev *)data;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002629 enum vxge_hw_status status = VXGE_HW_OK;
Jon Mason7adf7d12010-07-15 08:47:24 +00002630 struct vxge_vpath *vpath;
2631 struct vxge_ring *ring;
2632 int i;
stephen hemminger62ea0552011-06-20 10:35:07 +00002633 unsigned long rx_frms;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002634
2635 for (i = 0; i < vdev->no_of_vpath; i++) {
2636 ring = &vdev->vpaths[i].ring;
stephen hemminger62ea0552011-06-20 10:35:07 +00002637
2638 /* Truncated to machine word size number of frames */
2639 rx_frms = ACCESS_ONCE(ring->stats.rx_frms);
2640
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002641 /* Did this vpath received any packets */
stephen hemminger62ea0552011-06-20 10:35:07 +00002642 if (ring->stats.prev_rx_frms == rx_frms) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002643 status = vxge_hw_vpath_check_leak(ring->handle);
2644
2645 /* Did it received any packets last time */
2646 if ((VXGE_HW_FAIL == status) &&
2647 (VXGE_HW_FAIL == ring->last_status)) {
2648
2649 /* schedule vpath reset */
2650 if (!test_and_set_bit(i, &vdev->vp_reset)) {
Jon Mason7adf7d12010-07-15 08:47:24 +00002651 vpath = &vdev->vpaths[i];
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002652
2653 /* disable interrupts for this vpath */
2654 vxge_vpath_intr_disable(vdev, i);
2655
2656 /* stop the queue for this vpath */
Jon Mason98f45da2010-07-15 08:47:25 +00002657 netif_tx_stop_queue(vpath->fifo.txq);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002658 continue;
2659 }
2660 }
2661 }
stephen hemminger62ea0552011-06-20 10:35:07 +00002662 ring->stats.prev_rx_frms = rx_frms;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002663 ring->last_status = status;
2664 }
2665
2666 /* Check every 1 milli second */
2667 mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
2668}
2669
Michał Mirosławfeb990d2011-04-18 13:31:21 +00002670static u32 vxge_fix_features(struct net_device *dev, u32 features)
2671{
2672 u32 changed = dev->features ^ features;
2673
2674 /* Enabling RTH requires some of the logic in vxge_device_register and a
2675 * vpath reset. Due to these restrictions, only allow modification
2676 * while the interface is down.
2677 */
2678 if ((changed & NETIF_F_RXHASH) && netif_running(dev))
2679 features ^= NETIF_F_RXHASH;
2680
2681 return features;
2682}
2683
2684static int vxge_set_features(struct net_device *dev, u32 features)
2685{
2686 struct vxgedev *vdev = netdev_priv(dev);
2687 u32 changed = dev->features ^ features;
2688
2689 if (!(changed & NETIF_F_RXHASH))
2690 return 0;
2691
2692 /* !netif_running() ensured by vxge_fix_features() */
2693
2694 vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
2695 if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
2696 dev->features = features ^ NETIF_F_RXHASH;
2697 vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
2698 return -EIO;
2699 }
2700
2701 return 0;
2702}
2703
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002704/**
2705 * vxge_open
2706 * @dev: pointer to the device structure.
2707 *
2708 * This function is the open entry point of the driver. It mainly calls a
2709 * function to allocate Rx buffers and inserts them into the buffer
2710 * descriptors and then enables the Rx part of the NIC.
2711 * Return value: '0' on success and an appropriate (-)ve integer as
2712 * defined in errno.h file on failure.
2713 */
Jon Mason528f7272010-12-10 14:02:56 +00002714static int vxge_open(struct net_device *dev)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002715{
2716 enum vxge_hw_status status;
2717 struct vxgedev *vdev;
2718 struct __vxge_hw_device *hldev;
Jon Mason7adf7d12010-07-15 08:47:24 +00002719 struct vxge_vpath *vpath;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002720 int ret = 0;
2721 int i;
2722 u64 val64, function_mode;
Jon Mason528f7272010-12-10 14:02:56 +00002723
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002724 vxge_debug_entryexit(VXGE_TRACE,
2725 "%s: %s:%d", dev->name, __func__, __LINE__);
2726
Joe Perches5f54ceb2010-11-15 11:12:30 +00002727 vdev = netdev_priv(dev);
Joe Perchesd8ee7072010-11-15 10:13:58 +00002728 hldev = pci_get_drvdata(vdev->pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002729 function_mode = vdev->config.device_hw_info.function_mode;
2730
2731 /* make sure you have link off by default every time Nic is
2732 * initialized */
2733 netif_carrier_off(dev);
2734
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002735 /* Open VPATHs */
2736 status = vxge_open_vpaths(vdev);
2737 if (status != VXGE_HW_OK) {
2738 vxge_debug_init(VXGE_ERR,
2739 "%s: fatal: Vpath open failed", vdev->ndev->name);
2740 ret = -EPERM;
2741 goto out0;
2742 }
2743
2744 vdev->mtu = dev->mtu;
2745
2746 status = vxge_add_isr(vdev);
2747 if (status != VXGE_HW_OK) {
2748 vxge_debug_init(VXGE_ERR,
2749 "%s: fatal: ISR add failed", dev->name);
2750 ret = -EPERM;
2751 goto out1;
2752 }
2753
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002754 if (vdev->config.intr_type != MSI_X) {
2755 netif_napi_add(dev, &vdev->napi, vxge_poll_inta,
2756 vdev->config.napi_weight);
2757 napi_enable(&vdev->napi);
Jon Mason7adf7d12010-07-15 08:47:24 +00002758 for (i = 0; i < vdev->no_of_vpath; i++) {
2759 vpath = &vdev->vpaths[i];
2760 vpath->ring.napi_p = &vdev->napi;
2761 }
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002762 } else {
2763 for (i = 0; i < vdev->no_of_vpath; i++) {
Jon Mason7adf7d12010-07-15 08:47:24 +00002764 vpath = &vdev->vpaths[i];
2765 netif_napi_add(dev, &vpath->ring.napi,
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002766 vxge_poll_msix, vdev->config.napi_weight);
Jon Mason7adf7d12010-07-15 08:47:24 +00002767 napi_enable(&vpath->ring.napi);
2768 vpath->ring.napi_p = &vpath->ring.napi;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002769 }
2770 }
2771
2772 /* configure RTH */
2773 if (vdev->config.rth_steering) {
2774 status = vxge_rth_configure(vdev);
2775 if (status != VXGE_HW_OK) {
2776 vxge_debug_init(VXGE_ERR,
2777 "%s: fatal: RTH configuration failed",
2778 dev->name);
2779 ret = -EPERM;
2780 goto out2;
2781 }
2782 }
Jon Mason47f01db2010-11-11 04:25:53 +00002783 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2784 hldev->config.rth_en ? "enabled" : "disabled");
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002785
2786 for (i = 0; i < vdev->no_of_vpath; i++) {
Jon Mason7adf7d12010-07-15 08:47:24 +00002787 vpath = &vdev->vpaths[i];
2788
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002789 /* set initial mtu before enabling the device */
Jon Mason7adf7d12010-07-15 08:47:24 +00002790 status = vxge_hw_vpath_mtu_set(vpath->handle, vdev->mtu);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002791 if (status != VXGE_HW_OK) {
2792 vxge_debug_init(VXGE_ERR,
2793 "%s: fatal: can not set new MTU", dev->name);
2794 ret = -EPERM;
2795 goto out2;
2796 }
2797 }
2798
2799 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_TRACE, VXGE_COMPONENT_LL, vdev);
2800 vxge_debug_init(vdev->level_trace,
2801 "%s: MTU is %d", vdev->ndev->name, vdev->mtu);
2802 VXGE_DEVICE_DEBUG_LEVEL_SET(VXGE_ERR, VXGE_COMPONENT_LL, vdev);
2803
Jon Mason7adf7d12010-07-15 08:47:24 +00002804 /* Restore the DA, VID table and also multicast and promiscuous mode
2805 * states
2806 */
2807 if (vdev->all_multi_flg) {
2808 for (i = 0; i < vdev->no_of_vpath; i++) {
2809 vpath = &vdev->vpaths[i];
2810 vxge_restore_vpath_mac_addr(vpath);
2811 vxge_restore_vpath_vid_table(vpath);
2812
2813 status = vxge_hw_vpath_mcast_enable(vpath->handle);
2814 if (status != VXGE_HW_OK)
2815 vxge_debug_init(VXGE_ERR,
2816 "%s:%d Enabling multicast failed",
2817 __func__, __LINE__);
2818 }
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002819 }
2820
2821 /* Enable vpath to sniff all unicast/multicast traffic that not
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002822 * addressed to them. We allow promiscuous mode for PF only
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002823 */
2824
2825 val64 = 0;
2826 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
2827 val64 |= VXGE_HW_RXMAC_AUTHORIZE_ALL_ADDR_VP(i);
2828
2829 vxge_hw_mgmt_reg_write(vdev->devh,
2830 vxge_hw_mgmt_reg_type_mrpcim,
2831 0,
2832 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2833 rxmac_authorize_all_addr),
2834 val64);
2835
2836 vxge_hw_mgmt_reg_write(vdev->devh,
2837 vxge_hw_mgmt_reg_type_mrpcim,
2838 0,
2839 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2840 rxmac_authorize_all_vid),
2841 val64);
2842
2843 vxge_set_multicast(dev);
2844
2845 /* Enabling Bcast and mcast for all vpath */
2846 for (i = 0; i < vdev->no_of_vpath; i++) {
Jon Mason7adf7d12010-07-15 08:47:24 +00002847 vpath = &vdev->vpaths[i];
2848 status = vxge_hw_vpath_bcast_enable(vpath->handle);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002849 if (status != VXGE_HW_OK)
2850 vxge_debug_init(VXGE_ERR,
2851 "%s : Can not enable bcast for vpath "
2852 "id %d", dev->name, i);
2853 if (vdev->config.addr_learn_en) {
Jon Mason7adf7d12010-07-15 08:47:24 +00002854 status = vxge_hw_vpath_mcast_enable(vpath->handle);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002855 if (status != VXGE_HW_OK)
2856 vxge_debug_init(VXGE_ERR,
2857 "%s : Can not enable mcast for vpath "
2858 "id %d", dev->name, i);
2859 }
2860 }
2861
2862 vxge_hw_device_setpause_data(vdev->devh, 0,
2863 vdev->config.tx_pause_enable,
2864 vdev->config.rx_pause_enable);
2865
2866 if (vdev->vp_reset_timer.function == NULL)
2867 vxge_os_timer(vdev->vp_reset_timer,
2868 vxge_poll_vp_reset, vdev, (HZ/2));
2869
Jon Masone7935c92010-11-11 04:26:00 +00002870 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2871 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2872 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2873 HZ / 2);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002874
2875 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2876
2877 smp_wmb();
2878
2879 if (vxge_hw_device_link_state_get(vdev->devh) == VXGE_HW_LINK_UP) {
2880 netif_carrier_on(vdev->ndev);
Joe Perches75f5e1c2010-07-27 11:47:03 +00002881 netdev_notice(vdev->ndev, "Link Up\n");
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002882 vdev->stats.link_up++;
2883 }
2884
2885 vxge_hw_device_intr_enable(vdev->devh);
2886
2887 smp_wmb();
2888
2889 for (i = 0; i < vdev->no_of_vpath; i++) {
Jon Mason7adf7d12010-07-15 08:47:24 +00002890 vpath = &vdev->vpaths[i];
2891
2892 vxge_hw_vpath_enable(vpath->handle);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002893 smp_wmb();
Jon Mason7adf7d12010-07-15 08:47:24 +00002894 vxge_hw_vpath_rx_doorbell_init(vpath->handle);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002895 }
2896
Jon Masond03848e2010-07-15 08:47:23 +00002897 netif_tx_start_all_queues(vdev->ndev);
Jon Mason16fded72011-01-18 15:02:21 +00002898
2899 /* configure CI */
2900 vxge_config_ci_for_tti_rti(vdev);
2901
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002902 goto out0;
2903
2904out2:
2905 vxge_rem_isr(vdev);
2906
2907 /* Disable napi */
2908 if (vdev->config.intr_type != MSI_X)
2909 napi_disable(&vdev->napi);
2910 else {
2911 for (i = 0; i < vdev->no_of_vpath; i++)
2912 napi_disable(&vdev->vpaths[i].ring.napi);
2913 }
2914
2915out1:
2916 vxge_close_vpaths(vdev, 0);
2917out0:
2918 vxge_debug_entryexit(VXGE_TRACE,
2919 "%s: %s:%d Exiting...",
2920 dev->name, __func__, __LINE__);
2921 return ret;
2922}
2923
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002924/* Loop through the mac address list and delete all the entries */
stephen hemminger42821a52010-10-21 07:50:53 +00002925static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002926{
2927
2928 struct list_head *entry, *next;
2929 if (list_empty(&vpath->mac_addr_list))
2930 return;
2931
2932 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
2933 list_del(entry);
2934 kfree((struct vxge_mac_addrs *)entry);
2935 }
2936}
2937
2938static void vxge_napi_del_all(struct vxgedev *vdev)
2939{
2940 int i;
2941 if (vdev->config.intr_type != MSI_X)
2942 netif_napi_del(&vdev->napi);
2943 else {
2944 for (i = 0; i < vdev->no_of_vpath; i++)
2945 netif_napi_del(&vdev->vpaths[i].ring.napi);
2946 }
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002947}
2948
stephen hemminger42821a52010-10-21 07:50:53 +00002949static int do_vxge_close(struct net_device *dev, int do_io)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002950{
2951 enum vxge_hw_status status;
2952 struct vxgedev *vdev;
2953 struct __vxge_hw_device *hldev;
2954 int i;
2955 u64 val64, vpath_vector;
2956 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2957 dev->name, __func__, __LINE__);
2958
Joe Perches5f54ceb2010-11-15 11:12:30 +00002959 vdev = netdev_priv(dev);
Joe Perchesd8ee7072010-11-15 10:13:58 +00002960 hldev = pci_get_drvdata(vdev->pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002961
Sreenivasa Honnurbd9ee682009-07-01 21:14:03 +00002962 if (unlikely(!is_vxge_card_up(vdev)))
2963 return 0;
2964
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002965 /* If vxge_handle_crit_err task is executing,
2966 * wait till it completes. */
2967 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2968 msleep(50);
2969
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002970 if (do_io) {
2971 /* Put the vpath back in normal mode */
2972 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
2973 status = vxge_hw_mgmt_reg_read(vdev->devh,
2974 vxge_hw_mgmt_reg_type_mrpcim,
2975 0,
2976 (ulong)offsetof(
2977 struct vxge_hw_mrpcim_reg,
2978 rts_mgr_cbasin_cfg),
2979 &val64);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002980 if (status == VXGE_HW_OK) {
2981 val64 &= ~vpath_vector;
2982 status = vxge_hw_mgmt_reg_write(vdev->devh,
2983 vxge_hw_mgmt_reg_type_mrpcim,
2984 0,
2985 (ulong)offsetof(
2986 struct vxge_hw_mrpcim_reg,
2987 rts_mgr_cbasin_cfg),
2988 val64);
2989 }
2990
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002991 /* Remove the function 0 from promiscuous mode */
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00002992 vxge_hw_mgmt_reg_write(vdev->devh,
2993 vxge_hw_mgmt_reg_type_mrpcim,
2994 0,
2995 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
2996 rxmac_authorize_all_addr),
2997 0);
2998
2999 vxge_hw_mgmt_reg_write(vdev->devh,
3000 vxge_hw_mgmt_reg_type_mrpcim,
3001 0,
3002 (ulong)offsetof(struct vxge_hw_mrpcim_reg,
3003 rxmac_authorize_all_vid),
3004 0);
3005
3006 smp_wmb();
3007 }
Jon Masone7935c92010-11-11 04:26:00 +00003008
3009 if (vdev->titan1)
3010 del_timer_sync(&vdev->vp_lockup_timer);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003011
3012 del_timer_sync(&vdev->vp_reset_timer);
3013
Jon Mason4d2a5b42010-11-11 04:25:54 +00003014 if (do_io)
3015 vxge_hw_device_wait_receive_idle(hldev);
3016
3017 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3018
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003019 /* Disable napi */
3020 if (vdev->config.intr_type != MSI_X)
3021 napi_disable(&vdev->napi);
3022 else {
3023 for (i = 0; i < vdev->no_of_vpath; i++)
3024 napi_disable(&vdev->vpaths[i].ring.napi);
3025 }
3026
3027 netif_carrier_off(vdev->ndev);
Joe Perches75f5e1c2010-07-27 11:47:03 +00003028 netdev_notice(vdev->ndev, "Link Down\n");
Jon Masond03848e2010-07-15 08:47:23 +00003029 netif_tx_stop_all_queues(vdev->ndev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003030
3031 /* Note that at this point xmit() is stopped by upper layer */
3032 if (do_io)
3033 vxge_hw_device_intr_disable(vdev->devh);
3034
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003035 vxge_rem_isr(vdev);
3036
3037 vxge_napi_del_all(vdev);
3038
3039 if (do_io)
3040 vxge_reset_all_vpaths(vdev);
3041
3042 vxge_close_vpaths(vdev, 0);
3043
3044 vxge_debug_entryexit(VXGE_TRACE,
3045 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
3046
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003047 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
3048
3049 return 0;
3050}
3051
3052/**
3053 * vxge_close
3054 * @dev: device pointer.
3055 *
3056 * This is the stop entry point of the driver. It needs to undo exactly
3057 * whatever was done by the open entry point, thus it's usually referred to
3058 * as the close function.Among other things this function mainly stops the
3059 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3060 * Return value: '0' on success and an appropriate (-)ve integer as
3061 * defined in errno.h file on failure.
3062 */
Jon Mason528f7272010-12-10 14:02:56 +00003063static int vxge_close(struct net_device *dev)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003064{
3065 do_vxge_close(dev, 1);
3066 return 0;
3067}
3068
3069/**
3070 * vxge_change_mtu
3071 * @dev: net device pointer.
3072 * @new_mtu :the new MTU size for the device.
3073 *
3074 * A driver entry point to change MTU size for the device. Before changing
3075 * the MTU the device must be stopped.
3076 */
3077static int vxge_change_mtu(struct net_device *dev, int new_mtu)
3078{
3079 struct vxgedev *vdev = netdev_priv(dev);
3080
3081 vxge_debug_entryexit(vdev->level_trace,
3082 "%s:%d", __func__, __LINE__);
3083 if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > VXGE_HW_MAX_MTU)) {
3084 vxge_debug_init(vdev->level_err,
3085 "%s: mtu size is invalid", dev->name);
3086 return -EPERM;
3087 }
3088
3089 /* check if device is down already */
3090 if (unlikely(!is_vxge_card_up(vdev))) {
3091 /* just store new value, will use later on open() */
3092 dev->mtu = new_mtu;
3093 vxge_debug_init(vdev->level_err,
3094 "%s", "device is down on MTU change");
3095 return 0;
3096 }
3097
3098 vxge_debug_init(vdev->level_trace,
3099 "trying to apply new MTU %d", new_mtu);
3100
3101 if (vxge_close(dev))
3102 return -EIO;
3103
3104 dev->mtu = new_mtu;
3105 vdev->mtu = new_mtu;
3106
3107 if (vxge_open(dev))
3108 return -EIO;
3109
3110 vxge_debug_init(vdev->level_trace,
3111 "%s: MTU changed to %d", vdev->ndev->name, new_mtu);
3112
3113 vxge_debug_entryexit(vdev->level_trace,
3114 "%s:%d Exiting...", __func__, __LINE__);
3115
3116 return 0;
3117}
3118
3119/**
Eric Dumazetdd57f972010-08-18 03:42:54 +00003120 * vxge_get_stats64
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003121 * @dev: pointer to the device structure
Eric Dumazetdd57f972010-08-18 03:42:54 +00003122 * @stats: pointer to struct rtnl_link_stats64
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003123 *
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003124 */
Eric Dumazetdd57f972010-08-18 03:42:54 +00003125static struct rtnl_link_stats64 *
3126vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003127{
Eric Dumazetdd57f972010-08-18 03:42:54 +00003128 struct vxgedev *vdev = netdev_priv(dev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003129 int k;
3130
Eric Dumazetdd57f972010-08-18 03:42:54 +00003131 /* net_stats already zeroed by caller */
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003132 for (k = 0; k < vdev->no_of_vpath; k++) {
stephen hemminger62ea0552011-06-20 10:35:07 +00003133 struct vxge_ring_stats *rxstats = &vdev->vpaths[k].ring.stats;
3134 struct vxge_fifo_stats *txstats = &vdev->vpaths[k].fifo.stats;
3135 unsigned int start;
3136 u64 packets, bytes, multicast;
3137
3138 do {
3139 start = u64_stats_fetch_begin(&rxstats->syncp);
3140
3141 packets = rxstats->rx_frms;
3142 multicast = rxstats->rx_mcast;
3143 bytes = rxstats->rx_bytes;
3144 } while (u64_stats_fetch_retry(&rxstats->syncp, start));
3145
3146 net_stats->rx_packets += packets;
3147 net_stats->rx_bytes += bytes;
3148 net_stats->multicast += multicast;
3149
3150 net_stats->rx_errors += rxstats->rx_errors;
3151 net_stats->rx_dropped += rxstats->rx_dropped;
3152
3153 do {
3154 start = u64_stats_fetch_begin(&txstats->syncp);
3155
3156 packets = txstats->tx_frms;
3157 bytes = txstats->tx_bytes;
3158 } while (u64_stats_fetch_retry(&txstats->syncp, start));
3159
3160 net_stats->tx_packets += packets;
3161 net_stats->tx_bytes += bytes;
3162 net_stats->tx_errors += txstats->tx_errors;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003163 }
3164
3165 return net_stats;
3166}
3167
Jon Masoncd883a72011-04-08 11:11:21 +00003168static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
Jon Masonb81b3732010-11-11 04:25:58 +00003169{
3170 enum vxge_hw_status status;
3171 u64 val64;
3172
3173 /* Timestamp is passed to the driver via the FCS, therefore we
3174 * must disable the FCS stripping by the adapter. Since this is
3175 * required for the driver to load (due to a hardware bug),
3176 * there is no need to do anything special here.
3177 */
Jon Masoncd883a72011-04-08 11:11:21 +00003178 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3179 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3180 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
Jon Masonb81b3732010-11-11 04:25:58 +00003181
Jon Masoncd883a72011-04-08 11:11:21 +00003182 status = vxge_hw_mgmt_reg_write(devh,
Jon Masonb81b3732010-11-11 04:25:58 +00003183 vxge_hw_mgmt_reg_type_mrpcim,
3184 0,
3185 offsetof(struct vxge_hw_mrpcim_reg,
3186 xmac_timestamp),
3187 val64);
Jon Masoncd883a72011-04-08 11:11:21 +00003188 vxge_hw_device_flush_io(devh);
3189 devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
Jon Masonb81b3732010-11-11 04:25:58 +00003190 return status;
3191}
3192
3193static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3194{
3195 struct hwtstamp_config config;
Jon Masonb81b3732010-11-11 04:25:58 +00003196 int i;
3197
3198 if (copy_from_user(&config, data, sizeof(config)))
3199 return -EFAULT;
3200
3201 /* reserved for future extensions */
3202 if (config.flags)
3203 return -EINVAL;
3204
3205 /* Transmit HW Timestamp not supported */
3206 switch (config.tx_type) {
3207 case HWTSTAMP_TX_OFF:
3208 break;
3209 case HWTSTAMP_TX_ON:
3210 default:
3211 return -ERANGE;
3212 }
3213
3214 switch (config.rx_filter) {
3215 case HWTSTAMP_FILTER_NONE:
Jon Masonb81b3732010-11-11 04:25:58 +00003216 vdev->rx_hwts = 0;
3217 config.rx_filter = HWTSTAMP_FILTER_NONE;
3218 break;
3219
3220 case HWTSTAMP_FILTER_ALL:
3221 case HWTSTAMP_FILTER_SOME:
3222 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3223 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3224 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3225 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3226 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3227 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3228 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3229 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3230 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3231 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3232 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3233 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Jon Masoncd883a72011-04-08 11:11:21 +00003234 if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
Jon Masonb81b3732010-11-11 04:25:58 +00003235 return -EFAULT;
3236
3237 vdev->rx_hwts = 1;
3238 config.rx_filter = HWTSTAMP_FILTER_ALL;
3239 break;
3240
3241 default:
3242 return -ERANGE;
3243 }
3244
3245 for (i = 0; i < vdev->no_of_vpath; i++)
3246 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3247
3248 if (copy_to_user(data, &config, sizeof(config)))
3249 return -EFAULT;
3250
3251 return 0;
3252}
3253
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003254/**
3255 * vxge_ioctl
3256 * @dev: Device pointer.
3257 * @ifr: An IOCTL specific structure, that can contain a pointer to
3258 * a proprietary structure used to pass information to the driver.
3259 * @cmd: This is used to distinguish between the different commands that
3260 * can be passed to the IOCTL functions.
3261 *
3262 * Entry point for the Ioctl.
3263 */
3264static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3265{
Jon Masonb81b3732010-11-11 04:25:58 +00003266 struct vxgedev *vdev = netdev_priv(dev);
3267 int ret;
3268
3269 switch (cmd) {
3270 case SIOCSHWTSTAMP:
3271 ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3272 if (ret)
3273 return ret;
3274 break;
3275 default:
3276 return -EOPNOTSUPP;
3277 }
3278
3279 return 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003280}
3281
3282/**
3283 * vxge_tx_watchdog
3284 * @dev: pointer to net device structure
3285 *
3286 * Watchdog for transmit side.
3287 * This function is triggered if the Tx Queue is stopped
3288 * for a pre-defined amount of time when the Interface is still up.
3289 */
Jon Mason2e41f642010-12-10 14:02:59 +00003290static void vxge_tx_watchdog(struct net_device *dev)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003291{
3292 struct vxgedev *vdev;
3293
3294 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3295
Joe Perches5f54ceb2010-11-15 11:12:30 +00003296 vdev = netdev_priv(dev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003297
3298 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
3299
Jon Mason2e41f642010-12-10 14:02:59 +00003300 schedule_work(&vdev->reset_task);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003301 vxge_debug_entryexit(VXGE_TRACE,
3302 "%s:%d Exiting...", __func__, __LINE__);
3303}
3304
3305/**
3306 * vxge_vlan_rx_register
3307 * @dev: net device pointer.
3308 * @grp: vlan group
3309 *
3310 * Vlan group registration
3311 */
3312static void
3313vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3314{
3315 struct vxgedev *vdev;
3316 struct vxge_vpath *vpath;
3317 int vp;
3318 u64 vid;
3319 enum vxge_hw_status status;
3320 int i;
3321
3322 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3323
Joe Perches5f54ceb2010-11-15 11:12:30 +00003324 vdev = netdev_priv(dev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003325
3326 vpath = &vdev->vpaths[0];
3327 if ((NULL == grp) && (vpath->is_open)) {
3328 /* Get the first vlan */
3329 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3330
3331 while (status == VXGE_HW_OK) {
3332
3333 /* Delete this vlan from the vid table */
3334 for (vp = 0; vp < vdev->no_of_vpath; vp++) {
3335 vpath = &vdev->vpaths[vp];
3336 if (!vpath->is_open)
3337 continue;
3338
3339 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3340 }
3341
3342 /* Get the next vlan to be deleted */
3343 vpath = &vdev->vpaths[0];
3344 status = vxge_hw_vpath_vid_get(vpath->handle, &vid);
3345 }
3346 }
3347
3348 vdev->vlgrp = grp;
3349
3350 for (i = 0; i < vdev->no_of_vpath; i++) {
3351 if (vdev->vpaths[i].is_configured)
3352 vdev->vpaths[i].ring.vlgrp = grp;
3353 }
3354
3355 vxge_debug_entryexit(VXGE_TRACE,
3356 "%s:%d Exiting...", __func__, __LINE__);
3357}
3358
3359/**
3360 * vxge_vlan_rx_add_vid
3361 * @dev: net device pointer.
3362 * @vid: vid
3363 *
3364 * Add the vlan id to the devices vlan id table
3365 */
3366static void
3367vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3368{
3369 struct vxgedev *vdev;
3370 struct vxge_vpath *vpath;
3371 int vp_id;
3372
Joe Perches5f54ceb2010-11-15 11:12:30 +00003373 vdev = netdev_priv(dev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003374
3375 /* Add these vlan to the vid table */
3376 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3377 vpath = &vdev->vpaths[vp_id];
3378 if (!vpath->is_open)
3379 continue;
3380 vxge_hw_vpath_vid_add(vpath->handle, vid);
3381 }
3382}
3383
3384/**
3385 * vxge_vlan_rx_add_vid
3386 * @dev: net device pointer.
3387 * @vid: vid
3388 *
3389 * Remove the vlan id from the device's vlan id table
3390 */
3391static void
3392vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3393{
3394 struct vxgedev *vdev;
3395 struct vxge_vpath *vpath;
3396 int vp_id;
3397
3398 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3399
Joe Perches5f54ceb2010-11-15 11:12:30 +00003400 vdev = netdev_priv(dev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003401
3402 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3403
3404 /* Delete this vlan from the vid table */
3405 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
3406 vpath = &vdev->vpaths[vp_id];
3407 if (!vpath->is_open)
3408 continue;
3409 vxge_hw_vpath_vid_delete(vpath->handle, vid);
3410 }
3411 vxge_debug_entryexit(VXGE_TRACE,
3412 "%s:%d Exiting...", __func__, __LINE__);
3413}
3414
3415static const struct net_device_ops vxge_netdev_ops = {
3416 .ndo_open = vxge_open,
3417 .ndo_stop = vxge_close,
Eric Dumazetdd57f972010-08-18 03:42:54 +00003418 .ndo_get_stats64 = vxge_get_stats64,
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003419 .ndo_start_xmit = vxge_xmit,
3420 .ndo_validate_addr = eth_validate_addr,
3421 .ndo_set_multicast_list = vxge_set_multicast,
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003422 .ndo_do_ioctl = vxge_ioctl,
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003423 .ndo_set_mac_address = vxge_set_mac_addr,
3424 .ndo_change_mtu = vxge_change_mtu,
Michał Mirosławfeb990d2011-04-18 13:31:21 +00003425 .ndo_fix_features = vxge_fix_features,
3426 .ndo_set_features = vxge_set_features,
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003427 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3428 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3429 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003430 .ndo_tx_timeout = vxge_tx_watchdog,
3431#ifdef CONFIG_NET_POLL_CONTROLLER
3432 .ndo_poll_controller = vxge_netpoll,
3433#endif
3434};
3435
stephen hemminger42821a52010-10-21 07:50:53 +00003436static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3437 struct vxge_config *config,
3438 int high_dma, int no_of_vpath,
3439 struct vxgedev **vdev_out)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003440{
3441 struct net_device *ndev;
3442 enum vxge_hw_status status = VXGE_HW_OK;
3443 struct vxgedev *vdev;
Jon Mason98f45da2010-07-15 08:47:25 +00003444 int ret = 0, no_of_queue = 1;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003445 u64 stat;
3446
3447 *vdev_out = NULL;
Jon Masond03848e2010-07-15 08:47:23 +00003448 if (config->tx_steering_type)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003449 no_of_queue = no_of_vpath;
3450
3451 ndev = alloc_etherdev_mq(sizeof(struct vxgedev),
3452 no_of_queue);
3453 if (ndev == NULL) {
3454 vxge_debug_init(
3455 vxge_hw_device_trace_level_get(hldev),
3456 "%s : device allocation failed", __func__);
3457 ret = -ENODEV;
3458 goto _out0;
3459 }
3460
3461 vxge_debug_entryexit(
3462 vxge_hw_device_trace_level_get(hldev),
3463 "%s: %s:%d Entering...",
3464 ndev->name, __func__, __LINE__);
3465
3466 vdev = netdev_priv(ndev);
3467 memset(vdev, 0, sizeof(struct vxgedev));
3468
3469 vdev->ndev = ndev;
3470 vdev->devh = hldev;
3471 vdev->pdev = hldev->pdev;
3472 memcpy(&vdev->config, config, sizeof(struct vxge_config));
Jon Masonb81b3732010-11-11 04:25:58 +00003473 vdev->rx_hwts = 0;
Sergei Shtylyovff938e42011-02-28 11:57:33 -08003474 vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
Jon Masone7935c92010-11-11 04:26:00 +00003475
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003476 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3477
Michał Mirosławfeb990d2011-04-18 13:31:21 +00003478 ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
3479 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3480 NETIF_F_TSO | NETIF_F_TSO6 |
3481 NETIF_F_HW_VLAN_TX;
3482 if (vdev->config.rth_steering != NO_STEERING)
3483 ndev->hw_features |= NETIF_F_RXHASH;
3484
3485 ndev->features |= ndev->hw_features |
3486 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3487
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003488 /* Driver entry points */
3489 ndev->irq = vdev->pdev->irq;
3490 ndev->base_addr = (unsigned long) hldev->bar0;
3491
3492 ndev->netdev_ops = &vxge_netdev_ops;
3493
3494 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
Jon Mason2e41f642010-12-10 14:02:59 +00003495 INIT_WORK(&vdev->reset_task, vxge_reset);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003496
stephen hemminger42821a52010-10-21 07:50:53 +00003497 vxge_initialize_ethtool_ops(ndev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003498
3499 /* Allocate memory for vpath */
3500 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3501 no_of_vpath, GFP_KERNEL);
3502 if (!vdev->vpaths) {
3503 vxge_debug_init(VXGE_ERR,
3504 "%s: vpath memory allocation failed",
3505 vdev->ndev->name);
Jon Mason6cca2002011-01-18 15:02:19 +00003506 ret = -ENOMEM;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003507 goto _out1;
3508 }
3509
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003510 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3511 "%s : checksuming enabled", __func__);
3512
3513 if (high_dma) {
3514 ndev->features |= NETIF_F_HIGHDMA;
3515 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3516 "%s : using High DMA", __func__);
3517 }
3518
Jon Mason6cca2002011-01-18 15:02:19 +00003519 ret = register_netdev(ndev);
3520 if (ret) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003521 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3522 "%s: %s : device registration failed!",
3523 ndev->name, __func__);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003524 goto _out2;
3525 }
3526
3527 /* Set the factory defined MAC address initially */
3528 ndev->addr_len = ETH_ALEN;
3529
3530 /* Make Link state as off at this point, when the Link change
3531 * interrupt comes the state will be automatically changed to
3532 * the right state.
3533 */
3534 netif_carrier_off(ndev);
3535
3536 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3537 "%s: Ethernet device registered",
3538 ndev->name);
3539
Jon Masone8ac1752010-11-11 04:25:57 +00003540 hldev->ndev = ndev;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003541 *vdev_out = vdev;
3542
3543 /* Resetting the Device stats */
3544 status = vxge_hw_mrpcim_stats_access(
3545 hldev,
3546 VXGE_HW_STATS_OP_CLEAR_ALL_STATS,
3547 0,
3548 0,
3549 &stat);
3550
3551 if (status == VXGE_HW_ERR_PRIVILAGED_OPEARATION)
3552 vxge_debug_init(
3553 vxge_hw_device_trace_level_get(hldev),
3554 "%s: device stats clear returns"
3555 "VXGE_HW_ERR_PRIVILAGED_OPEARATION", ndev->name);
3556
3557 vxge_debug_entryexit(vxge_hw_device_trace_level_get(hldev),
3558 "%s: %s:%d Exiting...",
3559 ndev->name, __func__, __LINE__);
3560
3561 return ret;
3562_out2:
3563 kfree(vdev->vpaths);
3564_out1:
3565 free_netdev(ndev);
3566_out0:
3567 return ret;
3568}
3569
3570/*
3571 * vxge_device_unregister
3572 *
3573 * This function will unregister and free network device
3574 */
Jon Mason2c913082010-11-11 04:26:03 +00003575static void vxge_device_unregister(struct __vxge_hw_device *hldev)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003576{
3577 struct vxgedev *vdev;
3578 struct net_device *dev;
3579 char buf[IFNAMSIZ];
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003580
3581 dev = hldev->ndev;
3582 vdev = netdev_priv(dev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003583
Jon Mason2c913082010-11-11 04:26:03 +00003584 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3585 __func__, __LINE__);
3586
Jon Masonead5d232010-11-29 18:02:46 +00003587 strncpy(buf, dev->name, IFNAMSIZ);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003588
Tejun Heoba27d852010-12-15 04:03:29 +00003589 flush_work_sync(&vdev->reset_task);
3590
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003591 /* in 2.6 will call stop() if device is up */
3592 unregister_netdev(dev);
3593
Jon Mason6cca2002011-01-18 15:02:19 +00003594 kfree(vdev->vpaths);
3595
3596 /* we are safe to free it now */
3597 free_netdev(dev);
3598
Jon Mason2c913082010-11-11 04:26:03 +00003599 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3600 buf);
3601 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3602 __func__, __LINE__);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003603}
3604
3605/*
3606 * vxge_callback_crit_err
3607 *
3608 * This function is called by the alarm handler in interrupt context.
3609 * Driver must analyze it based on the event type.
3610 */
3611static void
3612vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3613 enum vxge_hw_event type, u64 vp_id)
3614{
3615 struct net_device *dev = hldev->ndev;
Joe Perches5f54ceb2010-11-15 11:12:30 +00003616 struct vxgedev *vdev = netdev_priv(dev);
Jon Mason98f45da2010-07-15 08:47:25 +00003617 struct vxge_vpath *vpath = NULL;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003618 int vpath_idx;
3619
3620 vxge_debug_entryexit(vdev->level_trace,
3621 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3622
3623 /* Note: This event type should be used for device wide
3624 * indications only - Serious errors, Slot freeze and critical errors
3625 */
3626 vdev->cric_err_event = type;
3627
Jon Mason98f45da2010-07-15 08:47:25 +00003628 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; vpath_idx++) {
3629 vpath = &vdev->vpaths[vpath_idx];
3630 if (vpath->device_id == vp_id)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003631 break;
Jon Mason98f45da2010-07-15 08:47:25 +00003632 }
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003633
3634 if (!test_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) {
3635 if (type == VXGE_HW_EVENT_SLOT_FREEZE) {
3636 vxge_debug_init(VXGE_ERR,
3637 "%s: Slot is frozen", vdev->ndev->name);
3638 } else if (type == VXGE_HW_EVENT_SERR) {
3639 vxge_debug_init(VXGE_ERR,
3640 "%s: Encountered Serious Error",
3641 vdev->ndev->name);
3642 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR)
3643 vxge_debug_init(VXGE_ERR,
3644 "%s: Encountered Critical Error",
3645 vdev->ndev->name);
3646 }
3647
3648 if ((type == VXGE_HW_EVENT_SERR) ||
3649 (type == VXGE_HW_EVENT_SLOT_FREEZE)) {
3650 if (unlikely(vdev->exec_mode))
3651 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3652 } else if (type == VXGE_HW_EVENT_CRITICAL_ERR) {
3653 vxge_hw_device_mask_all(hldev);
3654 if (unlikely(vdev->exec_mode))
3655 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3656 } else if ((type == VXGE_HW_EVENT_FIFO_ERR) ||
3657 (type == VXGE_HW_EVENT_VPATH_ERR)) {
3658
3659 if (unlikely(vdev->exec_mode))
3660 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
3661 else {
3662 /* check if this vpath is already set for reset */
3663 if (!test_and_set_bit(vpath_idx, &vdev->vp_reset)) {
3664
3665 /* disable interrupts for this vpath */
3666 vxge_vpath_intr_disable(vdev, vpath_idx);
3667
3668 /* stop the queue for this vpath */
Jon Mason98f45da2010-07-15 08:47:25 +00003669 netif_tx_stop_queue(vpath->fifo.txq);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003670 }
3671 }
3672 }
3673
3674 vxge_debug_entryexit(vdev->level_trace,
3675 "%s: %s:%d Exiting...",
3676 vdev->ndev->name, __func__, __LINE__);
3677}
3678
3679static void verify_bandwidth(void)
3680{
3681 int i, band_width, total = 0, equal_priority = 0;
3682
3683 /* 1. If user enters 0 for some fifo, give equal priority to all */
3684 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3685 if (bw_percentage[i] == 0) {
3686 equal_priority = 1;
3687 break;
3688 }
3689 }
3690
3691 if (!equal_priority) {
3692 /* 2. If sum exceeds 100, give equal priority to all */
3693 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3694 if (bw_percentage[i] == 0xFF)
3695 break;
3696
3697 total += bw_percentage[i];
3698 if (total > VXGE_HW_VPATH_BANDWIDTH_MAX) {
3699 equal_priority = 1;
3700 break;
3701 }
3702 }
3703 }
3704
3705 if (!equal_priority) {
3706 /* Is all the bandwidth consumed? */
3707 if (total < VXGE_HW_VPATH_BANDWIDTH_MAX) {
3708 if (i < VXGE_HW_MAX_VIRTUAL_PATHS) {
3709 /* Split rest of bw equally among next VPs*/
3710 band_width =
3711 (VXGE_HW_VPATH_BANDWIDTH_MAX - total) /
3712 (VXGE_HW_MAX_VIRTUAL_PATHS - i);
3713 if (band_width < 2) /* min of 2% */
3714 equal_priority = 1;
3715 else {
3716 for (; i < VXGE_HW_MAX_VIRTUAL_PATHS;
3717 i++)
3718 bw_percentage[i] =
3719 band_width;
3720 }
3721 }
3722 } else if (i < VXGE_HW_MAX_VIRTUAL_PATHS)
3723 equal_priority = 1;
3724 }
3725
3726 if (equal_priority) {
3727 vxge_debug_init(VXGE_ERR,
3728 "%s: Assigning equal bandwidth to all the vpaths",
3729 VXGE_DRIVER_NAME);
3730 bw_percentage[0] = VXGE_HW_VPATH_BANDWIDTH_MAX /
3731 VXGE_HW_MAX_VIRTUAL_PATHS;
3732 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3733 bw_percentage[i] = bw_percentage[0];
3734 }
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003735}
3736
3737/*
3738 * Vpath configuration
3739 */
3740static int __devinit vxge_config_vpaths(
3741 struct vxge_hw_device_config *device_config,
3742 u64 vpath_mask, struct vxge_config *config_param)
3743{
3744 int i, no_of_vpaths = 0, default_no_vpath = 0, temp;
3745 u32 txdl_size, txdl_per_memblock;
3746
3747 temp = driver_config->vpath_per_dev;
3748 if ((driver_config->vpath_per_dev == VXGE_USE_DEFAULT) &&
3749 (max_config_dev == VXGE_MAX_CONFIG_DEV)) {
3750 /* No more CPU. Return vpath number as zero.*/
3751 if (driver_config->g_no_cpus == -1)
3752 return 0;
3753
3754 if (!driver_config->g_no_cpus)
3755 driver_config->g_no_cpus = num_online_cpus();
3756
3757 driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
3758 if (!driver_config->vpath_per_dev)
3759 driver_config->vpath_per_dev = 1;
3760
3761 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3762 if (!vxge_bVALn(vpath_mask, i, 1))
3763 continue;
3764 else
3765 default_no_vpath++;
3766 if (default_no_vpath < driver_config->vpath_per_dev)
3767 driver_config->vpath_per_dev = default_no_vpath;
3768
3769 driver_config->g_no_cpus = driver_config->g_no_cpus -
3770 (driver_config->vpath_per_dev * 2);
3771 if (driver_config->g_no_cpus <= 0)
3772 driver_config->g_no_cpus = -1;
3773 }
3774
3775 if (driver_config->vpath_per_dev == 1) {
3776 vxge_debug_ll_config(VXGE_TRACE,
3777 "%s: Disable tx and rx steering, "
3778 "as single vpath is configured", VXGE_DRIVER_NAME);
3779 config_param->rth_steering = NO_STEERING;
3780 config_param->tx_steering_type = NO_STEERING;
3781 device_config->rth_en = 0;
3782 }
3783
3784 /* configure bandwidth */
3785 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3786 device_config->vp_config[i].min_bandwidth = bw_percentage[i];
3787
3788 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3789 device_config->vp_config[i].vp_id = i;
3790 device_config->vp_config[i].mtu = VXGE_HW_DEFAULT_MTU;
3791 if (no_of_vpaths < driver_config->vpath_per_dev) {
3792 if (!vxge_bVALn(vpath_mask, i, 1)) {
3793 vxge_debug_ll_config(VXGE_TRACE,
3794 "%s: vpath: %d is not available",
3795 VXGE_DRIVER_NAME, i);
3796 continue;
3797 } else {
3798 vxge_debug_ll_config(VXGE_TRACE,
3799 "%s: vpath: %d available",
3800 VXGE_DRIVER_NAME, i);
3801 no_of_vpaths++;
3802 }
3803 } else {
3804 vxge_debug_ll_config(VXGE_TRACE,
3805 "%s: vpath: %d is not configured, "
3806 "max_config_vpath exceeded",
3807 VXGE_DRIVER_NAME, i);
3808 break;
3809 }
3810
3811 /* Configure Tx fifo's */
3812 device_config->vp_config[i].fifo.enable =
3813 VXGE_HW_FIFO_ENABLE;
3814 device_config->vp_config[i].fifo.max_frags =
Sreenivasa Honnur5beefb42009-10-28 02:46:54 -07003815 MAX_SKB_FRAGS + 1;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003816 device_config->vp_config[i].fifo.memblock_size =
3817 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3818
Sreenivasa Honnur5beefb42009-10-28 02:46:54 -07003819 txdl_size = device_config->vp_config[i].fifo.max_frags *
3820 sizeof(struct vxge_hw_fifo_txd);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003821 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3822
3823 device_config->vp_config[i].fifo.fifo_blocks =
3824 ((VXGE_DEF_FIFO_LENGTH - 1) / txdl_per_memblock) + 1;
3825
3826 device_config->vp_config[i].fifo.intr =
3827 VXGE_HW_FIFO_QUEUE_INTR_DISABLE;
3828
3829 /* Configure tti properties */
3830 device_config->vp_config[i].tti.intr_enable =
3831 VXGE_HW_TIM_INTR_ENABLE;
3832
3833 device_config->vp_config[i].tti.btimer_val =
3834 (VXGE_TTI_BTIMER_VAL * 1000) / 272;
3835
3836 device_config->vp_config[i].tti.timer_ac_en =
3837 VXGE_HW_TIM_TIMER_AC_ENABLE;
3838
Jon Mason528f7272010-12-10 14:02:56 +00003839 /* For msi-x with napi (each vector has a handler of its own) -
3840 * Set CI to OFF for all vpaths
3841 */
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003842 device_config->vp_config[i].tti.timer_ci_en =
3843 VXGE_HW_TIM_TIMER_CI_DISABLE;
3844
3845 device_config->vp_config[i].tti.timer_ri_en =
3846 VXGE_HW_TIM_TIMER_RI_DISABLE;
3847
3848 device_config->vp_config[i].tti.util_sel =
3849 VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL;
3850
3851 device_config->vp_config[i].tti.ltimer_val =
3852 (VXGE_TTI_LTIMER_VAL * 1000) / 272;
3853
3854 device_config->vp_config[i].tti.rtimer_val =
3855 (VXGE_TTI_RTIMER_VAL * 1000) / 272;
3856
3857 device_config->vp_config[i].tti.urange_a = TTI_TX_URANGE_A;
3858 device_config->vp_config[i].tti.urange_b = TTI_TX_URANGE_B;
3859 device_config->vp_config[i].tti.urange_c = TTI_TX_URANGE_C;
3860 device_config->vp_config[i].tti.uec_a = TTI_TX_UFC_A;
3861 device_config->vp_config[i].tti.uec_b = TTI_TX_UFC_B;
3862 device_config->vp_config[i].tti.uec_c = TTI_TX_UFC_C;
3863 device_config->vp_config[i].tti.uec_d = TTI_TX_UFC_D;
3864
3865 /* Configure Rx rings */
3866 device_config->vp_config[i].ring.enable =
3867 VXGE_HW_RING_ENABLE;
3868
3869 device_config->vp_config[i].ring.ring_blocks =
3870 VXGE_HW_DEF_RING_BLOCKS;
Jon Mason528f7272010-12-10 14:02:56 +00003871
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003872 device_config->vp_config[i].ring.buffer_mode =
3873 VXGE_HW_RING_RXD_BUFFER_MODE_1;
Jon Mason528f7272010-12-10 14:02:56 +00003874
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003875 device_config->vp_config[i].ring.rxds_limit =
3876 VXGE_HW_DEF_RING_RXDS_LIMIT;
Jon Mason528f7272010-12-10 14:02:56 +00003877
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003878 device_config->vp_config[i].ring.scatter_mode =
3879 VXGE_HW_RING_SCATTER_MODE_A;
3880
3881 /* Configure rti properties */
3882 device_config->vp_config[i].rti.intr_enable =
3883 VXGE_HW_TIM_INTR_ENABLE;
3884
3885 device_config->vp_config[i].rti.btimer_val =
3886 (VXGE_RTI_BTIMER_VAL * 1000)/272;
3887
3888 device_config->vp_config[i].rti.timer_ac_en =
3889 VXGE_HW_TIM_TIMER_AC_ENABLE;
3890
3891 device_config->vp_config[i].rti.timer_ci_en =
3892 VXGE_HW_TIM_TIMER_CI_DISABLE;
3893
3894 device_config->vp_config[i].rti.timer_ri_en =
3895 VXGE_HW_TIM_TIMER_RI_DISABLE;
3896
3897 device_config->vp_config[i].rti.util_sel =
3898 VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL;
3899
3900 device_config->vp_config[i].rti.urange_a =
3901 RTI_RX_URANGE_A;
3902 device_config->vp_config[i].rti.urange_b =
3903 RTI_RX_URANGE_B;
3904 device_config->vp_config[i].rti.urange_c =
3905 RTI_RX_URANGE_C;
3906 device_config->vp_config[i].rti.uec_a = RTI_RX_UFC_A;
3907 device_config->vp_config[i].rti.uec_b = RTI_RX_UFC_B;
3908 device_config->vp_config[i].rti.uec_c = RTI_RX_UFC_C;
3909 device_config->vp_config[i].rti.uec_d = RTI_RX_UFC_D;
3910
3911 device_config->vp_config[i].rti.rtimer_val =
3912 (VXGE_RTI_RTIMER_VAL * 1000) / 272;
3913
3914 device_config->vp_config[i].rti.ltimer_val =
3915 (VXGE_RTI_LTIMER_VAL * 1000) / 272;
3916
3917 device_config->vp_config[i].rpa_strip_vlan_tag =
3918 vlan_tag_strip;
3919 }
3920
3921 driver_config->vpath_per_dev = temp;
3922 return no_of_vpaths;
3923}
3924
3925/* initialize device configuratrions */
3926static void __devinit vxge_device_config_init(
3927 struct vxge_hw_device_config *device_config,
3928 int *intr_type)
3929{
3930 /* Used for CQRQ/SRQ. */
3931 device_config->dma_blockpool_initial =
3932 VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
3933
3934 device_config->dma_blockpool_max =
3935 VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
3936
3937 if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
3938 max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
3939
3940#ifndef CONFIG_PCI_MSI
3941 vxge_debug_init(VXGE_ERR,
3942 "%s: This Kernel does not support "
3943 "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
3944 *intr_type = INTA;
3945#endif
3946
3947 /* Configure whether MSI-X or IRQL. */
3948 switch (*intr_type) {
3949 case INTA:
3950 device_config->intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
3951 break;
3952
3953 case MSI_X:
Jon Mason16fded72011-01-18 15:02:21 +00003954 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX_ONE_SHOT;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003955 break;
3956 }
Jon Mason528f7272010-12-10 14:02:56 +00003957
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003958 /* Timer period between device poll */
3959 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3960
3961 /* Configure mac based steering. */
3962 device_config->rts_mac_en = addr_learn_en;
3963
3964 /* Configure Vpaths */
3965 device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_MULTI_IT;
3966
3967 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3968 __func__);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003969 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3970 device_config->intr_mode);
3971 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3972 device_config->device_poll_millis);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00003973 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3974 device_config->rth_en);
3975 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
3976 device_config->rth_it_type);
3977}
3978
3979static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3980{
3981 int i;
3982
3983 vxge_debug_init(VXGE_TRACE,
3984 "%s: %d Vpath(s) opened",
3985 vdev->ndev->name, vdev->no_of_vpath);
3986
3987 switch (vdev->config.intr_type) {
3988 case INTA:
3989 vxge_debug_init(VXGE_TRACE,
3990 "%s: Interrupt type INTA", vdev->ndev->name);
3991 break;
3992
3993 case MSI_X:
3994 vxge_debug_init(VXGE_TRACE,
3995 "%s: Interrupt type MSI-X", vdev->ndev->name);
3996 break;
3997 }
3998
3999 if (vdev->config.rth_steering) {
4000 vxge_debug_init(VXGE_TRACE,
4001 "%s: RTH steering enabled for TCP_IPV4",
4002 vdev->ndev->name);
4003 } else {
4004 vxge_debug_init(VXGE_TRACE,
4005 "%s: RTH steering disabled", vdev->ndev->name);
4006 }
4007
4008 switch (vdev->config.tx_steering_type) {
4009 case NO_STEERING:
4010 vxge_debug_init(VXGE_TRACE,
4011 "%s: Tx steering disabled", vdev->ndev->name);
4012 break;
4013 case TX_PRIORITY_STEERING:
4014 vxge_debug_init(VXGE_TRACE,
4015 "%s: Unsupported tx steering option",
4016 vdev->ndev->name);
4017 vxge_debug_init(VXGE_TRACE,
4018 "%s: Tx steering disabled", vdev->ndev->name);
4019 vdev->config.tx_steering_type = 0;
4020 break;
4021 case TX_VLAN_STEERING:
4022 vxge_debug_init(VXGE_TRACE,
4023 "%s: Unsupported tx steering option",
4024 vdev->ndev->name);
4025 vxge_debug_init(VXGE_TRACE,
4026 "%s: Tx steering disabled", vdev->ndev->name);
4027 vdev->config.tx_steering_type = 0;
4028 break;
4029 case TX_MULTIQ_STEERING:
4030 vxge_debug_init(VXGE_TRACE,
4031 "%s: Tx multiqueue steering enabled",
4032 vdev->ndev->name);
4033 break;
4034 case TX_PORT_STEERING:
4035 vxge_debug_init(VXGE_TRACE,
4036 "%s: Tx port steering enabled",
4037 vdev->ndev->name);
4038 break;
4039 default:
4040 vxge_debug_init(VXGE_ERR,
4041 "%s: Unsupported tx steering type",
4042 vdev->ndev->name);
4043 vxge_debug_init(VXGE_TRACE,
4044 "%s: Tx steering disabled", vdev->ndev->name);
4045 vdev->config.tx_steering_type = 0;
4046 }
4047
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004048 if (vdev->config.addr_learn_en)
4049 vxge_debug_init(VXGE_TRACE,
4050 "%s: MAC Address learning enabled", vdev->ndev->name);
4051
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004052 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4053 if (!vxge_bVALn(vpath_mask, i, 1))
4054 continue;
4055 vxge_debug_ll_config(VXGE_TRACE,
4056 "%s: MTU size - %d", vdev->ndev->name,
4057 ((struct __vxge_hw_device *)(vdev->devh))->
4058 config.vp_config[i].mtu);
4059 vxge_debug_init(VXGE_TRACE,
4060 "%s: VLAN tag stripping %s", vdev->ndev->name,
4061 ((struct __vxge_hw_device *)(vdev->devh))->
4062 config.vp_config[i].rpa_strip_vlan_tag
4063 ? "Enabled" : "Disabled");
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004064 vxge_debug_ll_config(VXGE_TRACE,
4065 "%s: Max frags : %d", vdev->ndev->name,
4066 ((struct __vxge_hw_device *)(vdev->devh))->
4067 config.vp_config[i].fifo.max_frags);
4068 break;
4069 }
4070}
4071
4072#ifdef CONFIG_PM
4073/**
4074 * vxge_pm_suspend - vxge power management suspend entry point
4075 *
4076 */
4077static int vxge_pm_suspend(struct pci_dev *pdev, pm_message_t state)
4078{
4079 return -ENOSYS;
4080}
4081/**
4082 * vxge_pm_resume - vxge power management resume entry point
4083 *
4084 */
4085static int vxge_pm_resume(struct pci_dev *pdev)
4086{
4087 return -ENOSYS;
4088}
4089
4090#endif
4091
4092/**
4093 * vxge_io_error_detected - called when PCI error is detected
4094 * @pdev: Pointer to PCI device
4095 * @state: The current pci connection state
4096 *
4097 * This function is called after a PCI bus error affecting
4098 * this device has been detected.
4099 */
4100static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
4101 pci_channel_state_t state)
4102{
Joe Perchesd8ee7072010-11-15 10:13:58 +00004103 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004104 struct net_device *netdev = hldev->ndev;
4105
4106 netif_device_detach(netdev);
4107
Dean Nelsone33b9922009-07-31 09:14:03 +00004108 if (state == pci_channel_io_perm_failure)
4109 return PCI_ERS_RESULT_DISCONNECT;
4110
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004111 if (netif_running(netdev)) {
4112 /* Bring down the card, while avoiding PCI I/O */
4113 do_vxge_close(netdev, 0);
4114 }
4115
4116 pci_disable_device(pdev);
4117
4118 return PCI_ERS_RESULT_NEED_RESET;
4119}
4120
4121/**
4122 * vxge_io_slot_reset - called after the pci bus has been reset.
4123 * @pdev: Pointer to PCI device
4124 *
4125 * Restart the card from scratch, as if from a cold-boot.
4126 * At this point, the card has exprienced a hard reset,
4127 * followed by fixups by BIOS, and has its config space
4128 * set up identically to what it was at cold boot.
4129 */
4130static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
4131{
Joe Perchesd8ee7072010-11-15 10:13:58 +00004132 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004133 struct net_device *netdev = hldev->ndev;
4134
4135 struct vxgedev *vdev = netdev_priv(netdev);
4136
4137 if (pci_enable_device(pdev)) {
Joe Perches75f5e1c2010-07-27 11:47:03 +00004138 netdev_err(netdev, "Cannot re-enable device after reset\n");
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004139 return PCI_ERS_RESULT_DISCONNECT;
4140 }
4141
4142 pci_set_master(pdev);
Jon Mason528f7272010-12-10 14:02:56 +00004143 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004144
4145 return PCI_ERS_RESULT_RECOVERED;
4146}
4147
4148/**
4149 * vxge_io_resume - called when traffic can start flowing again.
4150 * @pdev: Pointer to PCI device
4151 *
4152 * This callback is called when the error recovery driver tells
4153 * us that its OK to resume normal operation.
4154 */
4155static void vxge_io_resume(struct pci_dev *pdev)
4156{
Joe Perchesd8ee7072010-11-15 10:13:58 +00004157 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004158 struct net_device *netdev = hldev->ndev;
4159
4160 if (netif_running(netdev)) {
4161 if (vxge_open(netdev)) {
Joe Perches75f5e1c2010-07-27 11:47:03 +00004162 netdev_err(netdev,
4163 "Can't bring device back up after reset\n");
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004164 return;
4165 }
4166 }
4167
4168 netif_device_attach(netdev);
4169}
4170
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -07004171static inline u32 vxge_get_num_vfs(u64 function_mode)
4172{
4173 u32 num_functions = 0;
4174
4175 switch (function_mode) {
4176 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4177 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
4178 num_functions = 8;
4179 break;
4180 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4181 num_functions = 1;
4182 break;
4183 case VXGE_HW_FUNCTION_MODE_SRIOV:
4184 case VXGE_HW_FUNCTION_MODE_MRIOV:
4185 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
4186 num_functions = 17;
4187 break;
4188 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
4189 num_functions = 4;
4190 break;
4191 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
4192 num_functions = 2;
4193 break;
4194 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
4195 num_functions = 8; /* TODO */
4196 break;
4197 }
4198 return num_functions;
4199}
4200
Jon Masone8ac1752010-11-11 04:25:57 +00004201int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4202{
4203 struct __vxge_hw_device *hldev = vdev->devh;
4204 u32 maj, min, bld, cmaj, cmin, cbld;
4205 enum vxge_hw_status status;
4206 const struct firmware *fw;
4207 int ret;
4208
4209 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4210 if (ret) {
4211 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4212 VXGE_DRIVER_NAME, fw_name);
4213 goto out;
4214 }
4215
4216 /* Load the new firmware onto the adapter */
4217 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4218 if (status != VXGE_HW_OK) {
4219 vxge_debug_init(VXGE_ERR,
4220 "%s: FW image download to adapter failed '%s'.",
4221 VXGE_DRIVER_NAME, fw_name);
4222 ret = -EIO;
4223 goto out;
4224 }
4225
4226 /* Read the version of the new firmware */
4227 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4228 if (status != VXGE_HW_OK) {
4229 vxge_debug_init(VXGE_ERR,
4230 "%s: Upgrade read version failed '%s'.",
4231 VXGE_DRIVER_NAME, fw_name);
4232 ret = -EIO;
4233 goto out;
4234 }
4235
4236 cmaj = vdev->config.device_hw_info.fw_version.major;
4237 cmin = vdev->config.device_hw_info.fw_version.minor;
4238 cbld = vdev->config.device_hw_info.fw_version.build;
4239 /* It's possible the version in /lib/firmware is not the latest version.
4240 * If so, we could get into a loop of trying to upgrade to the latest
4241 * and flashing the older version.
4242 */
4243 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4244 !override) {
4245 ret = -EINVAL;
4246 goto out;
4247 }
4248
4249 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4250 maj, min, bld);
4251
4252 /* Flash the adapter with the new firmware */
4253 status = vxge_hw_flash_fw(hldev);
4254 if (status != VXGE_HW_OK) {
4255 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4256 VXGE_DRIVER_NAME, fw_name);
4257 ret = -EIO;
4258 goto out;
4259 }
4260
4261 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4262 "hard reset before using, thus requiring a system reboot or a "
4263 "hotplug event.\n");
4264
4265out:
Jesper Juhle84f8852011-01-13 10:25:20 +00004266 release_firmware(fw);
Jon Masone8ac1752010-11-11 04:25:57 +00004267 return ret;
4268}
4269
4270static int vxge_probe_fw_update(struct vxgedev *vdev)
4271{
4272 u32 maj, min, bld;
4273 int ret, gpxe = 0;
4274 char *fw_name;
4275
4276 maj = vdev->config.device_hw_info.fw_version.major;
4277 min = vdev->config.device_hw_info.fw_version.minor;
4278 bld = vdev->config.device_hw_info.fw_version.build;
4279
4280 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4281 return 0;
4282
4283 /* Ignore the build number when determining if the current firmware is
4284 * "too new" to load the driver
4285 */
4286 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4287 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4288 "version, unable to load driver\n",
4289 VXGE_DRIVER_NAME);
4290 return -EINVAL;
4291 }
4292
4293 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4294 * work with this driver.
4295 */
4296 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4297 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4298 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4299 return -EINVAL;
4300 }
4301
4302 /* If file not specified, determine gPXE or not */
4303 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4304 int i;
4305 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4306 if (vdev->devh->eprom_versions[i]) {
4307 gpxe = 1;
4308 break;
4309 }
4310 }
4311 if (gpxe)
4312 fw_name = "vxge/X3fw-pxe.ncf";
4313 else
4314 fw_name = "vxge/X3fw.ncf";
4315
4316 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4317 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4318 * probe, so ignore them
4319 */
4320 if (ret != -EINVAL && ret != -ENOENT)
4321 return -EIO;
4322 else
4323 ret = 0;
4324
4325 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4326 VXGE_FW_VER(maj, min, 0)) {
4327 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4328 " be used with this driver.\n"
4329 "Please get the latest version from "
4330 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4331 VXGE_DRIVER_NAME, maj, min, bld);
4332 return -EINVAL;
4333 }
4334
4335 return ret;
4336}
4337
Jon Masonc92bf702010-12-10 14:02:57 +00004338static int __devinit is_sriov_initialized(struct pci_dev *pdev)
4339{
4340 int pos;
4341 u16 ctrl;
4342
4343 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4344 if (pos) {
4345 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4346 if (ctrl & PCI_SRIOV_CTRL_VFE)
4347 return 1;
4348 }
4349 return 0;
4350}
4351
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004352/**
4353 * vxge_probe
4354 * @pdev : structure containing the PCI related information of the device.
4355 * @pre: List of PCI devices supported by the driver listed in vxge_id_table.
4356 * Description:
4357 * This function is called when a new PCI device gets detected and initializes
4358 * it.
4359 * Return value:
4360 * returns 0 on success and negative on failure.
4361 *
4362 */
4363static int __devinit
4364vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4365{
Jon Mason2c913082010-11-11 04:26:03 +00004366 struct __vxge_hw_device *hldev;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004367 enum vxge_hw_status status;
4368 int ret;
4369 int high_dma = 0;
4370 u64 vpath_mask = 0;
4371 struct vxgedev *vdev;
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004372 struct vxge_config *ll_config = NULL;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004373 struct vxge_hw_device_config *device_config = NULL;
4374 struct vxge_hw_device_attr attr;
4375 int i, j, no_of_vpath = 0, max_vpath_supported = 0;
4376 u8 *macaddr;
4377 struct vxge_mac_addrs *entry;
4378 static int bus = -1, device = -1;
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -07004379 u32 host_type;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004380 u8 new_device = 0;
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -07004381 enum vxge_hw_status is_privileged;
4382 u32 function_mode;
4383 u32 num_vfs = 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004384
4385 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4386 attr.pdev = pdev;
4387
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -07004388 /* In SRIOV-17 mode, functions of the same adapter
Jon Mason528f7272010-12-10 14:02:56 +00004389 * can be deployed on different buses
4390 */
4391 if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4392 !pdev->is_virtfn)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004393 new_device = 1;
4394
4395 bus = pdev->bus->number;
4396 device = PCI_SLOT(pdev->devfn);
4397
4398 if (new_device) {
4399 if (driver_config->config_dev_cnt &&
4400 (driver_config->config_dev_cnt !=
4401 driver_config->total_dev_cnt))
4402 vxge_debug_init(VXGE_ERR,
4403 "%s: Configured %d of %d devices",
4404 VXGE_DRIVER_NAME,
4405 driver_config->config_dev_cnt,
4406 driver_config->total_dev_cnt);
4407 driver_config->config_dev_cnt = 0;
4408 driver_config->total_dev_cnt = 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004409 }
Jon Mason528f7272010-12-10 14:02:56 +00004410
Sreenivasa Honnur90023972010-04-08 01:48:30 -07004411 /* Now making the CPU based no of vpath calculation
4412 * applicable for individual functions as well.
4413 */
4414 driver_config->g_no_cpus = 0;
Sreenivasa Honnur657205b2009-10-05 01:52:54 +00004415 driver_config->vpath_per_dev = max_config_vpath;
4416
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004417 driver_config->total_dev_cnt++;
4418 if (++driver_config->config_dev_cnt > max_config_dev) {
4419 ret = 0;
4420 goto _exit0;
4421 }
4422
4423 device_config = kzalloc(sizeof(struct vxge_hw_device_config),
4424 GFP_KERNEL);
4425 if (!device_config) {
4426 ret = -ENOMEM;
4427 vxge_debug_init(VXGE_ERR,
4428 "device_config : malloc failed %s %d",
4429 __FILE__, __LINE__);
4430 goto _exit0;
4431 }
4432
Jon Mason528f7272010-12-10 14:02:56 +00004433 ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004434 if (!ll_config) {
4435 ret = -ENOMEM;
4436 vxge_debug_init(VXGE_ERR,
Jon Mason528f7272010-12-10 14:02:56 +00004437 "device_config : malloc failed %s %d",
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004438 __FILE__, __LINE__);
4439 goto _exit0;
4440 }
4441 ll_config->tx_steering_type = TX_MULTIQ_STEERING;
4442 ll_config->intr_type = MSI_X;
4443 ll_config->napi_weight = NEW_NAPI_WEIGHT;
4444 ll_config->rth_steering = RTH_STEERING;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004445
4446 /* get the default configuration parameters */
4447 vxge_hw_device_config_default_get(device_config);
4448
4449 /* initialize configuration parameters */
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004450 vxge_device_config_init(device_config, &ll_config->intr_type);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004451
4452 ret = pci_enable_device(pdev);
4453 if (ret) {
4454 vxge_debug_init(VXGE_ERR,
4455 "%s : can not enable PCI device", __func__);
4456 goto _exit0;
4457 }
4458
Denis Kirjanovb3837ce2009-12-25 18:48:33 -08004459 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004460 vxge_debug_ll_config(VXGE_TRACE,
4461 "%s : using 64bit DMA", __func__);
4462
4463 high_dma = 1;
4464
4465 if (pci_set_consistent_dma_mask(pdev,
Denis Kirjanovb3837ce2009-12-25 18:48:33 -08004466 DMA_BIT_MASK(64))) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004467 vxge_debug_init(VXGE_ERR,
4468 "%s : unable to obtain 64bit DMA for "
4469 "consistent allocations", __func__);
4470 ret = -ENOMEM;
4471 goto _exit1;
4472 }
Denis Kirjanovb3837ce2009-12-25 18:48:33 -08004473 } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004474 vxge_debug_ll_config(VXGE_TRACE,
4475 "%s : using 32bit DMA", __func__);
4476 } else {
4477 ret = -ENOMEM;
4478 goto _exit1;
4479 }
4480
Jon Mason6cca2002011-01-18 15:02:19 +00004481 ret = pci_request_region(pdev, 0, VXGE_DRIVER_NAME);
4482 if (ret) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004483 vxge_debug_init(VXGE_ERR,
4484 "%s : request regions failed", __func__);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004485 goto _exit1;
4486 }
4487
4488 pci_set_master(pdev);
4489
4490 attr.bar0 = pci_ioremap_bar(pdev, 0);
4491 if (!attr.bar0) {
4492 vxge_debug_init(VXGE_ERR,
4493 "%s : cannot remap io memory bar0", __func__);
4494 ret = -ENODEV;
4495 goto _exit2;
4496 }
4497 vxge_debug_ll_config(VXGE_TRACE,
4498 "pci ioremap bar0: %p:0x%llx",
4499 attr.bar0,
4500 (unsigned long long)pci_resource_start(pdev, 0));
4501
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004502 status = vxge_hw_device_hw_info_get(attr.bar0,
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004503 &ll_config->device_hw_info);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004504 if (status != VXGE_HW_OK) {
4505 vxge_debug_init(VXGE_ERR,
4506 "%s: Reading of hardware info failed."
4507 "Please try upgrading the firmware.", VXGE_DRIVER_NAME);
4508 ret = -EINVAL;
Sreenivasa Honnur7975d1e2009-07-01 21:12:23 +00004509 goto _exit3;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004510 }
4511
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004512 vpath_mask = ll_config->device_hw_info.vpath_mask;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004513 if (vpath_mask == 0) {
4514 vxge_debug_ll_config(VXGE_TRACE,
4515 "%s: No vpaths available in device", VXGE_DRIVER_NAME);
4516 ret = -EINVAL;
Sreenivasa Honnur7975d1e2009-07-01 21:12:23 +00004517 goto _exit3;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004518 }
4519
4520 vxge_debug_ll_config(VXGE_TRACE,
4521 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4522 (unsigned long long)vpath_mask);
4523
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004524 function_mode = ll_config->device_hw_info.function_mode;
4525 host_type = ll_config->device_hw_info.host_type;
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -07004526 is_privileged = __vxge_hw_device_is_privilaged(host_type,
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004527 ll_config->device_hw_info.func_id);
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -07004528
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004529 /* Check how many vpaths are available */
4530 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4531 if (!((vpath_mask) & vxge_mBIT(i)))
4532 continue;
4533 max_vpath_supported++;
4534 }
4535
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -07004536 if (new_device)
4537 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4538
Sivakumar Subramani5dbc9012009-06-16 18:48:55 +00004539 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
Jon Masonc92bf702010-12-10 14:02:57 +00004540 if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4541 (ll_config->intr_type != INTA)) {
4542 ret = pci_enable_sriov(pdev, num_vfs);
Sreenivasa Honnurcb27ec62010-04-08 01:48:57 -07004543 if (ret)
4544 vxge_debug_ll_config(VXGE_ERR,
4545 "Failed in enabling SRIOV mode: %d\n", ret);
Jon Masonc92bf702010-12-10 14:02:57 +00004546 /* No need to fail out, as an error here is non-fatal */
Sivakumar Subramani5dbc9012009-06-16 18:48:55 +00004547 }
4548
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004549 /*
4550 * Configure vpaths and get driver configured number of vpaths
4551 * which is less than or equal to the maximum vpaths per function.
4552 */
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004553 no_of_vpath = vxge_config_vpaths(device_config, vpath_mask, ll_config);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004554 if (!no_of_vpath) {
4555 vxge_debug_ll_config(VXGE_ERR,
4556 "%s: No more vpaths to configure", VXGE_DRIVER_NAME);
4557 ret = 0;
Sreenivasa Honnur7975d1e2009-07-01 21:12:23 +00004558 goto _exit3;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004559 }
4560
4561 /* Setting driver callbacks */
4562 attr.uld_callbacks.link_up = vxge_callback_link_up;
4563 attr.uld_callbacks.link_down = vxge_callback_link_down;
4564 attr.uld_callbacks.crit_err = vxge_callback_crit_err;
4565
4566 status = vxge_hw_device_initialize(&hldev, &attr, device_config);
4567 if (status != VXGE_HW_OK) {
4568 vxge_debug_init(VXGE_ERR,
4569 "Failed to initialize device (%d)", status);
4570 ret = -EINVAL;
Sreenivasa Honnur7975d1e2009-07-01 21:12:23 +00004571 goto _exit3;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004572 }
4573
Jon Masone8ac1752010-11-11 04:25:57 +00004574 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4575 ll_config->device_hw_info.fw_version.minor,
4576 ll_config->device_hw_info.fw_version.build) >=
4577 VXGE_EPROM_FW_VER) {
4578 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4579
4580 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4581 if (status != VXGE_HW_OK) {
4582 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4583 VXGE_DRIVER_NAME);
4584 /* This is a non-fatal error, continue */
4585 }
4586
4587 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4588 hldev->eprom_versions[i] = img[i].version;
4589 if (!img[i].is_valid)
4590 break;
4591 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
Jon Mason1d15f812011-01-18 15:02:20 +00004592 "%d.%d.%d.%d", VXGE_DRIVER_NAME, i,
Jon Masone8ac1752010-11-11 04:25:57 +00004593 VXGE_EPROM_IMG_MAJOR(img[i].version),
4594 VXGE_EPROM_IMG_MINOR(img[i].version),
4595 VXGE_EPROM_IMG_FIX(img[i].version),
4596 VXGE_EPROM_IMG_BUILD(img[i].version));
4597 }
4598 }
4599
Sreenivasa Honnurfa41fd12009-10-05 01:56:35 +00004600 /* if FCS stripping is not disabled in MAC fail driver load */
Jon Masonb81b3732010-11-11 04:25:58 +00004601 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4602 if (status != VXGE_HW_OK) {
4603 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4604 " failing driver load", VXGE_DRIVER_NAME);
Sreenivasa Honnurfa41fd12009-10-05 01:56:35 +00004605 ret = -EINVAL;
4606 goto _exit4;
4607 }
4608
Jon Masoncd883a72011-04-08 11:11:21 +00004609 /* Always enable HWTS. This will always cause the FCS to be invalid,
4610 * due to the fact that HWTS is using the FCS as the location of the
4611 * timestamp. The HW FCS checking will still correctly determine if
4612 * there is a valid checksum, and the FCS is being removed by the driver
4613 * anyway. So no fucntionality is being lost. Since it is always
4614 * enabled, we now simply use the ioctl call to set whether or not the
4615 * driver should be paying attention to the HWTS.
4616 */
4617 if (is_privileged == VXGE_HW_OK) {
4618 status = vxge_timestamp_config(hldev);
4619 if (status != VXGE_HW_OK) {
4620 vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
4621 VXGE_DRIVER_NAME);
4622 ret = -EFAULT;
4623 goto _exit4;
4624 }
4625 }
4626
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004627 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4628
4629 /* set private device info */
4630 pci_set_drvdata(pdev, hldev);
4631
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004632 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4633 ll_config->addr_learn_en = addr_learn_en;
4634 ll_config->rth_algorithm = RTH_ALG_JENKINS;
Jon Mason47f01db2010-11-11 04:25:53 +00004635 ll_config->rth_hash_type_tcpipv4 = 1;
4636 ll_config->rth_hash_type_ipv4 = 0;
4637 ll_config->rth_hash_type_tcpipv6 = 0;
4638 ll_config->rth_hash_type_ipv6 = 0;
4639 ll_config->rth_hash_type_tcpipv6ex = 0;
4640 ll_config->rth_hash_type_ipv6ex = 0;
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004641 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4642 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4643 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004644
Jon Masone8ac1752010-11-11 04:25:57 +00004645 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4646 &vdev);
4647 if (ret) {
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004648 ret = -EINVAL;
Sreenivasa Honnur7975d1e2009-07-01 21:12:23 +00004649 goto _exit4;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004650 }
4651
Jon Masone8ac1752010-11-11 04:25:57 +00004652 ret = vxge_probe_fw_update(vdev);
4653 if (ret)
4654 goto _exit5;
4655
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004656 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4657 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4658 vxge_hw_device_trace_level_get(hldev));
4659
4660 /* set private HW device info */
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004661 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4662 vdev->bar0 = attr.bar0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004663 vdev->max_vpath_supported = max_vpath_supported;
4664 vdev->no_of_vpath = no_of_vpath;
4665
4666 /* Virtual Path count */
4667 for (i = 0, j = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4668 if (!vxge_bVALn(vpath_mask, i, 1))
4669 continue;
4670 if (j >= vdev->no_of_vpath)
4671 break;
4672
4673 vdev->vpaths[j].is_configured = 1;
4674 vdev->vpaths[j].device_id = i;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004675 vdev->vpaths[j].ring.driver_id = j;
4676 vdev->vpaths[j].vdev = vdev;
4677 vdev->vpaths[j].max_mac_addr_cnt = max_mac_vpath;
4678 memcpy((u8 *)vdev->vpaths[j].macaddr,
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004679 ll_config->device_hw_info.mac_addrs[i],
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004680 ETH_ALEN);
4681
4682 /* Initialize the mac address list header */
4683 INIT_LIST_HEAD(&vdev->vpaths[j].mac_addr_list);
4684
4685 vdev->vpaths[j].mac_addr_cnt = 0;
4686 vdev->vpaths[j].mcast_addr_cnt = 0;
4687 j++;
4688 }
4689 vdev->exec_mode = VXGE_EXEC_MODE_DISABLE;
4690 vdev->max_config_port = max_config_port;
4691
4692 vdev->vlan_tag_strip = vlan_tag_strip;
4693
4694 /* map the hashing selector table to the configured vpaths */
4695 for (i = 0; i < vdev->no_of_vpath; i++)
4696 vdev->vpath_selector[i] = vpath_selector[i];
4697
4698 macaddr = (u8 *)vdev->vpaths[0].macaddr;
4699
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004700 ll_config->device_hw_info.serial_number[VXGE_HW_INFO_LEN - 1] = '\0';
4701 ll_config->device_hw_info.product_desc[VXGE_HW_INFO_LEN - 1] = '\0';
4702 ll_config->device_hw_info.part_number[VXGE_HW_INFO_LEN - 1] = '\0';
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004703
4704 vxge_debug_init(VXGE_TRACE, "%s: SERIAL NUMBER: %s",
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004705 vdev->ndev->name, ll_config->device_hw_info.serial_number);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004706
4707 vxge_debug_init(VXGE_TRACE, "%s: PART NUMBER: %s",
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004708 vdev->ndev->name, ll_config->device_hw_info.part_number);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004709
4710 vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004711 vdev->ndev->name, ll_config->device_hw_info.product_desc);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004712
hartleysbf54e732010-01-05 06:59:23 +00004713 vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
4714 vdev->ndev->name, macaddr);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004715
4716 vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
4717 vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
4718
4719 vxge_debug_init(VXGE_TRACE,
4720 "%s: Firmware version : %s Date : %s", vdev->ndev->name,
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004721 ll_config->device_hw_info.fw_version.version,
4722 ll_config->device_hw_info.fw_date.date);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004723
Sreenivasa Honnur0a25bdc2009-07-01 21:18:06 +00004724 if (new_device) {
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004725 switch (ll_config->device_hw_info.function_mode) {
Sreenivasa Honnur0a25bdc2009-07-01 21:18:06 +00004726 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
4727 vxge_debug_init(VXGE_TRACE,
4728 "%s: Single Function Mode Enabled", vdev->ndev->name);
4729 break;
4730 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
4731 vxge_debug_init(VXGE_TRACE,
4732 "%s: Multi Function Mode Enabled", vdev->ndev->name);
4733 break;
4734 case VXGE_HW_FUNCTION_MODE_SRIOV:
4735 vxge_debug_init(VXGE_TRACE,
4736 "%s: Single Root IOV Mode Enabled", vdev->ndev->name);
4737 break;
4738 case VXGE_HW_FUNCTION_MODE_MRIOV:
4739 vxge_debug_init(VXGE_TRACE,
4740 "%s: Multi Root IOV Mode Enabled", vdev->ndev->name);
4741 break;
4742 }
4743 }
4744
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004745 vxge_print_parm(vdev, vpath_mask);
4746
4747 /* Store the fw version for ethttool option */
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004748 strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004749 memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
4750 memcpy(vdev->ndev->perm_addr, vdev->ndev->dev_addr, ETH_ALEN);
4751
4752 /* Copy the station mac address to the list */
4753 for (i = 0; i < vdev->no_of_vpath; i++) {
Joe Perchese80be0b2010-11-27 23:05:45 +00004754 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004755 if (NULL == entry) {
4756 vxge_debug_init(VXGE_ERR,
4757 "%s: mac_addr_list : memory allocation failed",
4758 vdev->ndev->name);
4759 ret = -EPERM;
Jon Masone8ac1752010-11-11 04:25:57 +00004760 goto _exit6;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004761 }
4762 macaddr = (u8 *)&entry->macaddr;
4763 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
4764 list_add(&entry->item, &vdev->vpaths[i].mac_addr_list);
4765 vdev->vpaths[i].mac_addr_cnt = 1;
4766 }
4767
Sreenivasa Honnur914d0d72009-07-01 21:13:12 +00004768 kfree(device_config);
Sreenivasa Honnureb5f10c2009-10-05 01:57:29 +00004769
4770 /*
4771 * INTA is shared in multi-function mode. This is unlike the INTA
4772 * implementation in MR mode, where each VH has its own INTA message.
4773 * - INTA is masked (disabled) as long as at least one function sets
4774 * its TITAN_MASK_ALL_INT.ALARM bit.
4775 * - INTA is unmasked (enabled) when all enabled functions have cleared
4776 * their own TITAN_MASK_ALL_INT.ALARM bit.
4777 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4778 * Though this driver leaves the top level interrupts unmasked while
4779 * leaving the required module interrupt bits masked on exit, there
4780 * could be a rougue driver around that does not follow this procedure
4781 * resulting in a failure to generate interrupts. The following code is
4782 * present to prevent such a failure.
4783 */
4784
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004785 if (ll_config->device_hw_info.function_mode ==
Sreenivasa Honnureb5f10c2009-10-05 01:57:29 +00004786 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4787 if (vdev->config.intr_type == INTA)
4788 vxge_hw_device_unmask_all(hldev);
4789
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004790 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4791 vdev->ndev->name, __func__, __LINE__);
4792
4793 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4794 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4795 vxge_hw_device_trace_level_get(hldev));
4796
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004797 kfree(ll_config);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004798 return 0;
4799
Jon Masone8ac1752010-11-11 04:25:57 +00004800_exit6:
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004801 for (i = 0; i < vdev->no_of_vpath; i++)
4802 vxge_free_mac_add_list(&vdev->vpaths[i]);
Jon Masone8ac1752010-11-11 04:25:57 +00004803_exit5:
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004804 vxge_device_unregister(hldev);
Sreenivasa Honnur7975d1e2009-07-01 21:12:23 +00004805_exit4:
Jon Mason6cca2002011-01-18 15:02:19 +00004806 pci_set_drvdata(pdev, NULL);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004807 vxge_hw_device_terminate(hldev);
Jon Mason6cca2002011-01-18 15:02:19 +00004808 pci_disable_sriov(pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004809_exit3:
4810 iounmap(attr.bar0);
4811_exit2:
Jon Masondc66daa2010-12-10 14:02:58 +00004812 pci_release_region(pdev, 0);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004813_exit1:
4814 pci_disable_device(pdev);
4815_exit0:
Prarit Bhargava7dad1712010-06-02 05:51:19 -07004816 kfree(ll_config);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004817 kfree(device_config);
4818 driver_config->config_dev_cnt--;
Jon Mason6cca2002011-01-18 15:02:19 +00004819 driver_config->total_dev_cnt--;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004820 return ret;
4821}
4822
4823/**
4824 * vxge_rem_nic - Free the PCI device
4825 * @pdev: structure containing the PCI related information of the device.
4826 * Description: This function is called by the Pci subsystem to release a
4827 * PCI device and free up all resource held up by the device.
4828 */
Jon Mason2c913082010-11-11 04:26:03 +00004829static void __devexit vxge_remove(struct pci_dev *pdev)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004830{
Jon Mason2c913082010-11-11 04:26:03 +00004831 struct __vxge_hw_device *hldev;
Jon Mason6cca2002011-01-18 15:02:19 +00004832 struct vxgedev *vdev;
4833 int i;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004834
Joe Perchesd8ee7072010-11-15 10:13:58 +00004835 hldev = pci_get_drvdata(pdev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004836 if (hldev == NULL)
4837 return;
Jon Mason2c913082010-11-11 04:26:03 +00004838
Jon Mason6cca2002011-01-18 15:02:19 +00004839 vdev = netdev_priv(hldev->ndev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004840
Jon Mason2c913082010-11-11 04:26:03 +00004841 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
Jon Mason2c913082010-11-11 04:26:03 +00004842 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4843 __func__);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004844
Jon Mason6cca2002011-01-18 15:02:19 +00004845 for (i = 0; i < vdev->no_of_vpath; i++)
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004846 vxge_free_mac_add_list(&vdev->vpaths[i]);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004847
Jon Mason6cca2002011-01-18 15:02:19 +00004848 vxge_device_unregister(hldev);
4849 pci_set_drvdata(pdev, NULL);
4850 /* Do not call pci_disable_sriov here, as it will break child devices */
4851 vxge_hw_device_terminate(hldev);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004852 iounmap(vdev->bar0);
Jon Mason6cca2002011-01-18 15:02:19 +00004853 pci_release_region(pdev, 0);
4854 pci_disable_device(pdev);
4855 driver_config->config_dev_cnt--;
4856 driver_config->total_dev_cnt--;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004857
Jon Mason2c913082010-11-11 04:26:03 +00004858 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4859 __func__, __LINE__);
Jon Mason2c913082010-11-11 04:26:03 +00004860 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4861 __LINE__);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004862}
4863
4864static struct pci_error_handlers vxge_err_handler = {
4865 .error_detected = vxge_io_error_detected,
4866 .slot_reset = vxge_io_slot_reset,
4867 .resume = vxge_io_resume,
4868};
4869
4870static struct pci_driver vxge_driver = {
4871 .name = VXGE_DRIVER_NAME,
4872 .id_table = vxge_id_table,
4873 .probe = vxge_probe,
4874 .remove = __devexit_p(vxge_remove),
4875#ifdef CONFIG_PM
4876 .suspend = vxge_pm_suspend,
4877 .resume = vxge_pm_resume,
4878#endif
4879 .err_handler = &vxge_err_handler,
4880};
4881
4882static int __init
4883vxge_starter(void)
4884{
4885 int ret = 0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004886
Joe Perches75f5e1c2010-07-27 11:47:03 +00004887 pr_info("Copyright(c) 2002-2010 Exar Corp.\n");
4888 pr_info("Driver version: %s\n", DRV_VERSION);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004889
4890 verify_bandwidth();
4891
4892 driver_config = kzalloc(sizeof(struct vxge_drv_config), GFP_KERNEL);
4893 if (!driver_config)
4894 return -ENOMEM;
4895
4896 ret = pci_register_driver(&vxge_driver);
Jon Mason528f7272010-12-10 14:02:56 +00004897 if (ret) {
4898 kfree(driver_config);
4899 goto err;
4900 }
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004901
4902 if (driver_config->config_dev_cnt &&
4903 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
4904 vxge_debug_init(VXGE_ERR,
4905 "%s: Configured %d of %d devices",
4906 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4907 driver_config->total_dev_cnt);
Jon Mason528f7272010-12-10 14:02:56 +00004908err:
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00004909 return ret;
4910}
4911
4912static void __exit
4913vxge_closer(void)
4914{
4915 pci_unregister_driver(&vxge_driver);
4916 kfree(driver_config);
4917}
4918module_init(vxge_starter);
4919module_exit(vxge_closer);