blob: 7c83ba4be9d7866ecf685fdd03c109e84d0d05b1 [file] [log] [blame]
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +00001/******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
9 *
10 * vxge-main.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14#ifndef VXGE_MAIN_H
15#define VXGE_MAIN_H
16
17#include "vxge-traffic.h"
18#include "vxge-config.h"
19#include "vxge-version.h"
20#include <linux/list.h>
21
22#define VXGE_DRIVER_NAME "vxge"
23#define VXGE_DRIVER_VENDOR "Neterion, Inc"
Sreenivasa Honnur22fa1252009-07-01 21:17:24 +000024#define VXGE_DRIVER_FW_VERSION_MAJOR 1
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +000025
26#define DRV_VERSION VXGE_VERSION_MAJOR"."VXGE_VERSION_MINOR"."\
27 VXGE_VERSION_FIX"."VXGE_VERSION_BUILD"-"\
28 VXGE_VERSION_FOR
29
30#define PCI_DEVICE_ID_TITAN_WIN 0x5733
31#define PCI_DEVICE_ID_TITAN_UNI 0x5833
32#define VXGE_USE_DEFAULT 0xffffffff
33#define VXGE_HW_VPATH_MSIX_ACTIVE 4
34#define VXGE_HW_RXSYNC_FREQ_CNT 4
35#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
36#define VXGE_LL_RX_COPY_THRESHOLD 256
37#define VXGE_DEF_FIFO_LENGTH 84
38
39#define NO_STEERING 0
40#define PORT_STEERING 0x1
41#define RTH_STEERING 0x2
42#define RX_TOS_STEERING 0x3
43#define RX_VLAN_STEERING 0x4
44#define RTH_BUCKET_SIZE 4
45
46#define TX_PRIORITY_STEERING 1
47#define TX_VLAN_STEERING 2
48#define TX_PORT_STEERING 3
49#define TX_MULTIQ_STEERING 4
50
51#define VXGE_HW_MAC_ADDR_LEARN_DEFAULT VXGE_HW_RTS_MAC_DISABLE
52
53#define VXGE_TTI_BTIMER_VAL 250000
54
55#define VXGE_TTI_LTIMER_VAL 1000
56#define VXGE_TTI_RTIMER_VAL 0
57#define VXGE_RTI_BTIMER_VAL 250
58#define VXGE_RTI_LTIMER_VAL 100
59#define VXGE_RTI_RTIMER_VAL 0
60#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
61#define VXGE_ISR_POLLING_CNT 8
62#define VXGE_MAX_CONFIG_DEV 0xFF
63#define VXGE_EXEC_MODE_DISABLE 0
64#define VXGE_EXEC_MODE_ENABLE 1
65#define VXGE_MAX_CONFIG_PORT 1
66#define VXGE_ALL_VID_DISABLE 0
67#define VXGE_ALL_VID_ENABLE 1
68#define VXGE_PAUSE_CTRL_DISABLE 0
69#define VXGE_PAUSE_CTRL_ENABLE 1
70
71#define TTI_TX_URANGE_A 5
72#define TTI_TX_URANGE_B 15
73#define TTI_TX_URANGE_C 40
74#define TTI_TX_UFC_A 5
75#define TTI_TX_UFC_B 40
76#define TTI_TX_UFC_C 60
77#define TTI_TX_UFC_D 100
78
79#define RTI_RX_URANGE_A 5
80#define RTI_RX_URANGE_B 15
81#define RTI_RX_URANGE_C 40
82#define RTI_RX_UFC_A 1
83#define RTI_RX_UFC_B 5
84#define RTI_RX_UFC_C 10
85#define RTI_RX_UFC_D 15
86
87/* Milli secs timer period */
88#define VXGE_TIMER_DELAY 10000
89
90#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
91
92enum vxge_reset_event {
93 /* reset events */
94 VXGE_LL_VPATH_RESET = 0,
95 VXGE_LL_DEVICE_RESET = 1,
96 VXGE_LL_FULL_RESET = 2,
97 VXGE_LL_START_RESET = 3,
98 VXGE_LL_COMPL_RESET = 4
99};
100/* These flags represent the devices temporary state */
101enum vxge_device_state_t {
102__VXGE_STATE_RESET_CARD = 0,
103__VXGE_STATE_CARD_UP
104};
105
106enum vxge_mac_addr_state {
107 /* mac address states */
108 VXGE_LL_MAC_ADDR_IN_LIST = 0,
109 VXGE_LL_MAC_ADDR_IN_DA_TABLE = 1
110};
111
112struct vxge_drv_config {
113 int config_dev_cnt;
114 int total_dev_cnt;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000115 int g_no_cpus;
116 unsigned int vpath_per_dev;
117};
118
119struct macInfo {
120 unsigned char macaddr[ETH_ALEN];
121 unsigned char macmask[ETH_ALEN];
122 unsigned int vpath_no;
123 enum vxge_mac_addr_state state;
124};
125
126struct vxge_config {
127 int tx_pause_enable;
128 int rx_pause_enable;
129
130#define NEW_NAPI_WEIGHT 64
131 int napi_weight;
132#define VXGE_GRO_DONOT_AGGREGATE 0
133#define VXGE_GRO_ALWAYS_AGGREGATE 1
134 int gro_enable;
135 int intr_type;
136#define INTA 0
137#define MSI 1
138#define MSI_X 2
139
140 int addr_learn_en;
141
142 int rth_steering;
143 int rth_algorithm;
144 int rth_hash_type_tcpipv4;
145 int rth_hash_type_ipv4;
146 int rth_hash_type_tcpipv6;
147 int rth_hash_type_ipv6;
148 int rth_hash_type_tcpipv6ex;
149 int rth_hash_type_ipv6ex;
150 int rth_bkt_sz;
151 int rth_jhash_golden_ratio;
152 int tx_steering_type;
153 int fifo_indicate_max_pkts;
154 struct vxge_hw_device_hw_info device_hw_info;
155};
156
157struct vxge_msix_entry {
158 /* Mimicing the msix_entry struct of Kernel. */
159 u16 vector;
160 u16 entry;
161 u16 in_use;
162 void *arg;
163};
164
165/* Software Statistics */
166
167struct vxge_sw_stats {
168 /* Network Stats (interface stats) */
169 struct net_device_stats net_stats;
170
171 /* Tx */
172 u64 tx_frms;
173 u64 tx_errors;
174 u64 tx_bytes;
175 u64 txd_not_free;
176 u64 txd_out_of_desc;
177
178 /* Virtual Path */
179 u64 vpaths_open;
180 u64 vpath_open_fail;
181
182 /* Rx */
183 u64 rx_frms;
184 u64 rx_errors;
185 u64 rx_bytes;
186 u64 rx_mcast;
187
188 /* Misc. */
189 u64 link_up;
190 u64 link_down;
191 u64 pci_map_fail;
192 u64 skb_alloc_fail;
193};
194
195struct vxge_mac_addrs {
196 struct list_head item;
197 u64 macaddr;
198 u64 macmask;
199 enum vxge_mac_addr_state state;
200};
201
202struct vxgedev;
203
204struct vxge_fifo_stats {
205 u64 tx_frms;
206 u64 tx_errors;
207 u64 tx_bytes;
208 u64 txd_not_free;
209 u64 txd_out_of_desc;
210 u64 pci_map_fail;
211};
212
213struct vxge_fifo {
214 struct net_device *ndev;
215 struct pci_dev *pdev;
216 struct __vxge_hw_fifo *handle;
217
218 /* The vpath id maintained in the driver -
219 * 0 to 'maximum_vpaths_in_function - 1'
220 */
221 int driver_id;
222 int tx_steering_type;
223 int indicate_max_pkts;
224 spinlock_t tx_lock;
225 /* flag used to maintain queue state when MULTIQ is not enabled */
226#define VPATH_QUEUE_START 0
227#define VPATH_QUEUE_STOP 1
228 int queue_state;
229
230 /* Tx stats */
231 struct vxge_fifo_stats stats;
232} ____cacheline_aligned;
233
234struct vxge_ring_stats {
235 u64 prev_rx_frms;
236 u64 rx_frms;
237 u64 rx_errors;
238 u64 rx_dropped;
239 u64 rx_bytes;
240 u64 rx_mcast;
241 u64 pci_map_fail;
242 u64 skb_alloc_fail;
243};
244
245struct vxge_ring {
246 struct net_device *ndev;
247 struct pci_dev *pdev;
248 struct __vxge_hw_ring *handle;
249 /* The vpath id maintained in the driver -
250 * 0 to 'maximum_vpaths_in_function - 1'
251 */
252 int driver_id;
253
254 /* copy of the flag indicating whether rx_csum is to be used */
255 u32 rx_csum;
256
257 int pkts_processed;
258 int budget;
259 int gro_enable;
260
261 struct napi_struct napi;
Sreenivasa Honnura5d165b2009-07-01 21:16:37 +0000262 struct napi_struct *napi_p;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000263
264#define VXGE_MAX_MAC_ADDR_COUNT 30
265
266 int vlan_tag_strip;
267 struct vlan_group *vlgrp;
268 int rx_vector_no;
269 enum vxge_hw_status last_status;
270
271 /* Rx stats */
272 struct vxge_ring_stats stats;
273} ____cacheline_aligned;
274
275struct vxge_vpath {
276
277 struct vxge_fifo fifo;
278 struct vxge_ring ring;
279
280 struct __vxge_hw_vpath_handle *handle;
281
282 /* Actual vpath id for this vpath in the device - 0 to 16 */
283 int device_id;
284 int max_mac_addr_cnt;
285 int is_configured;
286 int is_open;
287 struct vxgedev *vdev;
288 u8 (macaddr)[ETH_ALEN];
289 u8 (macmask)[ETH_ALEN];
290
291#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
292 /* mac addresses currently programmed into NIC */
293 u16 mac_addr_cnt;
294 u16 mcast_addr_cnt;
295 struct list_head mac_addr_list;
296
297 u32 level_err;
298 u32 level_trace;
299};
300#define VXGE_COPY_DEBUG_INFO_TO_LL(vdev, err, trace) { \
301 for (i = 0; i < vdev->no_of_vpath; i++) { \
302 vdev->vpaths[i].level_err = err; \
303 vdev->vpaths[i].level_trace = trace; \
304 } \
305 vdev->level_err = err; \
306 vdev->level_trace = trace; \
307}
308
309struct vxgedev {
310 struct net_device *ndev;
311 struct pci_dev *pdev;
312 struct __vxge_hw_device *devh;
313 struct vlan_group *vlgrp;
314 int vlan_tag_strip;
315 struct vxge_config config;
316 unsigned long state;
317
318 /* Indicates which vpath to reset */
319 unsigned long vp_reset;
320
321 /* Timer used for polling vpath resets */
322 struct timer_list vp_reset_timer;
323
324 /* Timer used for polling vpath lockup */
325 struct timer_list vp_lockup_timer;
326
327 /*
328 * Flags to track whether device is in All Multicast
329 * or in promiscuous mode.
330 */
331 u16 all_multi_flg;
332
333 /* A flag indicating whether rx_csum is to be used or not. */
334 u32 rx_csum;
335
336 struct vxge_msix_entry *vxge_entries;
337 struct msix_entry *entries;
338 /*
339 * 4 for each vpath * 17;
340 * total is 68
341 */
342#define VXGE_MAX_REQUESTED_MSIX 68
343#define VXGE_INTR_STRLEN 80
344 char desc[VXGE_MAX_REQUESTED_MSIX][VXGE_INTR_STRLEN];
345
346 enum vxge_hw_event cric_err_event;
347
348 int max_vpath_supported;
349 int no_of_vpath;
350
351 struct napi_struct napi;
352 /* A debug option, when enabled and if error condition occurs,
353 * the driver will do following steps:
354 * - mask all interrupts
355 * - Not clear the source of the alarm
356 * - gracefully stop all I/O
357 * A diagnostic dump of register and stats at this point
358 * reveals very useful information.
359 */
360 int exec_mode;
361 int max_config_port;
362 struct vxge_vpath *vpaths;
363
364 struct __vxge_hw_vpath_handle *vp_handles[VXGE_HW_MAX_VIRTUAL_PATHS];
365 void __iomem *bar0;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000366 struct vxge_sw_stats stats;
367 int mtu;
368 /* Below variables are used for vpath selection to transmit a packet */
369 u8 vpath_selector[VXGE_HW_MAX_VIRTUAL_PATHS];
370 u64 vpaths_deployed;
371
372 u32 intr_cnt;
373 u32 level_err;
374 u32 level_trace;
375 char fw_version[VXGE_HW_FW_STRLEN];
376};
377
378struct vxge_rx_priv {
379 struct sk_buff *skb;
Benjamin LaHaiseea11bbe2009-08-04 10:21:57 +0000380 unsigned char *skb_data;
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000381 dma_addr_t data_dma;
382 dma_addr_t data_size;
383};
384
385struct vxge_tx_priv {
386 struct sk_buff *skb;
387 dma_addr_t dma_buffers[MAX_SKB_FRAGS+1];
388};
389
390#define VXGE_MODULE_PARAM_INT(p, val) \
391 static int p = val; \
392 module_param(p, int, 0)
393
394#define vxge_os_bug(fmt...) { printk(fmt); BUG(); }
395
396#define vxge_os_timer(timer, handle, arg, exp) do { \
397 init_timer(&timer); \
398 timer.function = handle; \
399 timer.data = (unsigned long) arg; \
400 mod_timer(&timer, (jiffies + exp)); \
401 } while (0);
402
403int __devinit vxge_device_register(struct __vxge_hw_device *devh,
404 struct vxge_config *config,
405 int high_dma, int no_of_vpath,
406 struct vxgedev **vdev);
407
408void vxge_device_unregister(struct __vxge_hw_device *devh);
409
410void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id);
411
412void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id);
413
414void vxge_callback_link_up(struct __vxge_hw_device *devh);
415
416void vxge_callback_link_down(struct __vxge_hw_device *devh);
417
418enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
419 struct macInfo *mac);
420
421int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
422
423int vxge_reset(struct vxgedev *vdev);
424
425enum vxge_hw_status
426vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
427 u8 t_code, void *userdata);
428
429enum vxge_hw_status
430vxge_xmit_compl(struct __vxge_hw_fifo *fifo_hw, void *dtr,
Benjamin LaHaiseff67df52009-08-04 10:21:03 +0000431 enum vxge_hw_fifo_tcode t_code, void *userdata,
432 struct sk_buff ***skb_ptr, int nr_skbs, int *more);
Ramkrishna Vepa703da5a2009-04-01 18:15:13 +0000433
434int vxge_close(struct net_device *dev);
435
436int vxge_open(struct net_device *dev);
437
438void vxge_close_vpaths(struct vxgedev *vdev, int index);
439
440int vxge_open_vpaths(struct vxgedev *vdev);
441
442enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
443
444void vxge_stop_all_tx_queue(struct vxgedev *vdev);
445
446void vxge_stop_tx_queue(struct vxge_fifo *fifo);
447
448void vxge_start_all_tx_queue(struct vxgedev *vdev);
449
450void vxge_wake_tx_queue(struct vxge_fifo *fifo, struct sk_buff *skb);
451
452enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
453 struct macInfo *mac);
454
455enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
456 struct macInfo *mac);
457
458int vxge_mac_list_add(struct vxge_vpath *vpath,
459 struct macInfo *mac);
460
461void vxge_free_mac_add_list(struct vxge_vpath *vpath);
462
463enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
464
465enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
466
467int do_vxge_close(struct net_device *dev, int do_io);
468extern void initialize_ethtool_ops(struct net_device *ndev);
469/**
470 * #define VXGE_DEBUG_INIT: debug for initialization functions
471 * #define VXGE_DEBUG_TX : debug transmit related functions
472 * #define VXGE_DEBUG_RX : debug recevice related functions
473 * #define VXGE_DEBUG_MEM : debug memory module
474 * #define VXGE_DEBUG_LOCK: debug locks
475 * #define VXGE_DEBUG_SEM : debug semaphore
476 * #define VXGE_DEBUG_ENTRYEXIT: debug functions by adding entry exit statements
477*/
478#define VXGE_DEBUG_INIT 0x00000001
479#define VXGE_DEBUG_TX 0x00000002
480#define VXGE_DEBUG_RX 0x00000004
481#define VXGE_DEBUG_MEM 0x00000008
482#define VXGE_DEBUG_LOCK 0x00000010
483#define VXGE_DEBUG_SEM 0x00000020
484#define VXGE_DEBUG_ENTRYEXIT 0x00000040
485#define VXGE_DEBUG_INTR 0x00000080
486#define VXGE_DEBUG_LL_CONFIG 0x00000100
487
488/* Debug tracing for VXGE driver */
489#ifndef VXGE_DEBUG_MASK
490#define VXGE_DEBUG_MASK 0x0
491#endif
492
493#if (VXGE_DEBUG_LL_CONFIG & VXGE_DEBUG_MASK)
494#define vxge_debug_ll_config(level, fmt, ...) \
495 vxge_debug_ll(level, VXGE_DEBUG_LL_CONFIG, fmt, __VA_ARGS__)
496#else
497#define vxge_debug_ll_config(level, fmt, ...)
498#endif
499
500#if (VXGE_DEBUG_INIT & VXGE_DEBUG_MASK)
501#define vxge_debug_init(level, fmt, ...) \
502 vxge_debug_ll(level, VXGE_DEBUG_INIT, fmt, __VA_ARGS__)
503#else
504#define vxge_debug_init(level, fmt, ...)
505#endif
506
507#if (VXGE_DEBUG_TX & VXGE_DEBUG_MASK)
508#define vxge_debug_tx(level, fmt, ...) \
509 vxge_debug_ll(level, VXGE_DEBUG_TX, fmt, __VA_ARGS__)
510#else
511#define vxge_debug_tx(level, fmt, ...)
512#endif
513
514#if (VXGE_DEBUG_RX & VXGE_DEBUG_MASK)
515#define vxge_debug_rx(level, fmt, ...) \
516 vxge_debug_ll(level, VXGE_DEBUG_RX, fmt, __VA_ARGS__)
517#else
518#define vxge_debug_rx(level, fmt, ...)
519#endif
520
521#if (VXGE_DEBUG_MEM & VXGE_DEBUG_MASK)
522#define vxge_debug_mem(level, fmt, ...) \
523 vxge_debug_ll(level, VXGE_DEBUG_MEM, fmt, __VA_ARGS__)
524#else
525#define vxge_debug_mem(level, fmt, ...)
526#endif
527
528#if (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK)
529#define vxge_debug_entryexit(level, fmt, ...) \
530 vxge_debug_ll(level, VXGE_DEBUG_ENTRYEXIT, fmt, __VA_ARGS__)
531#else
532#define vxge_debug_entryexit(level, fmt, ...)
533#endif
534
535#if (VXGE_DEBUG_INTR & VXGE_DEBUG_MASK)
536#define vxge_debug_intr(level, fmt, ...) \
537 vxge_debug_ll(level, VXGE_DEBUG_INTR, fmt, __VA_ARGS__)
538#else
539#define vxge_debug_intr(level, fmt, ...)
540#endif
541
542#define VXGE_DEVICE_DEBUG_LEVEL_SET(level, mask, vdev) {\
543 vxge_hw_device_debug_set((struct __vxge_hw_device *)vdev->devh, \
544 level, mask);\
545 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, \
546 vxge_hw_device_error_level_get((struct __vxge_hw_device *) \
547 vdev->devh), \
548 vxge_hw_device_trace_level_get((struct __vxge_hw_device *) \
549 vdev->devh));\
550}
551
552#ifdef NETIF_F_GSO
553#define vxge_tcp_mss(skb) (skb_shinfo(skb)->gso_size)
554#define vxge_udp_mss(skb) (skb_shinfo(skb)->gso_size)
555#define vxge_offload_type(skb) (skb_shinfo(skb)->gso_type)
556#endif
557
558#endif