| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2005 - 2009 ServerEngines | 
|  | 3 | * All rights reserved. | 
|  | 4 | * | 
|  | 5 | * This program is free software; you can redistribute it and/or | 
|  | 6 | * modify it under the terms of the GNU General Public License version 2 | 
|  | 7 | * as published by the Free Software Foundation.  The full GNU General | 
|  | 8 | * Public License is included in this distribution in the file called COPYING. | 
|  | 9 | * | 
|  | 10 | * Contact Information: | 
|  | 11 | * linux-drivers@serverengines.com | 
|  | 12 | * | 
|  | 13 | * ServerEngines | 
|  | 14 | * 209 N. Fair Oaks Ave | 
|  | 15 | * Sunnyvale, CA 94085 | 
|  | 16 | */ | 
|  | 17 |  | 
|  | 18 | #ifndef BE_H | 
|  | 19 | #define BE_H | 
|  | 20 |  | 
|  | 21 | #include <linux/pci.h> | 
|  | 22 | #include <linux/etherdevice.h> | 
|  | 23 | #include <linux/version.h> | 
|  | 24 | #include <linux/delay.h> | 
|  | 25 | #include <net/tcp.h> | 
|  | 26 | #include <net/ip.h> | 
|  | 27 | #include <net/ipv6.h> | 
|  | 28 | #include <linux/if_vlan.h> | 
|  | 29 | #include <linux/workqueue.h> | 
|  | 30 | #include <linux/interrupt.h> | 
|  | 31 | #include <linux/inet_lro.h> | 
|  | 32 |  | 
|  | 33 | #include "be_hw.h" | 
|  | 34 |  | 
|  | 35 | #define DRV_VER			"2.0.348" | 
|  | 36 | #define DRV_NAME		"be2net" | 
|  | 37 | #define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC" | 
| Ajit Khaparde | c4ca237 | 2009-05-18 15:38:55 -0700 | [diff] [blame] | 38 | #define OC_NAME			"Emulex OneConnect 10Gbps NIC" | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 39 | #define DRV_DESC		BE_NAME "Driver" | 
|  | 40 |  | 
| Ajit Khaparde | c4ca237 | 2009-05-18 15:38:55 -0700 | [diff] [blame] | 41 | #define BE_VENDOR_ID 		0x19a2 | 
|  | 42 | #define BE_DEVICE_ID1		0x211 | 
|  | 43 | #define OC_DEVICE_ID1		0x700 | 
|  | 44 | #define OC_DEVICE_ID2		0x701 | 
|  | 45 |  | 
|  | 46 | static inline char *nic_name(struct pci_dev *pdev) | 
|  | 47 | { | 
|  | 48 | if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2) | 
|  | 49 | return OC_NAME; | 
|  | 50 | else | 
|  | 51 | return BE_NAME; | 
|  | 52 | } | 
|  | 53 |  | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 54 | /* Number of bytes of an RX frame that are copied to skb->data */ | 
|  | 55 | #define BE_HDR_LEN 		64 | 
|  | 56 | #define BE_MAX_JUMBO_FRAME_SIZE	9018 | 
|  | 57 | #define BE_MIN_MTU		256 | 
|  | 58 |  | 
|  | 59 | #define BE_NUM_VLANS_SUPPORTED	64 | 
|  | 60 | #define BE_MAX_EQD		96 | 
|  | 61 | #define	BE_MAX_TX_FRAG_COUNT	30 | 
|  | 62 |  | 
|  | 63 | #define EVNT_Q_LEN		1024 | 
|  | 64 | #define TX_Q_LEN		2048 | 
|  | 65 | #define TX_CQ_LEN		1024 | 
|  | 66 | #define RX_Q_LEN		1024	/* Does not support any other value */ | 
|  | 67 | #define RX_CQ_LEN		1024 | 
|  | 68 | #define MCC_Q_LEN		64	/* total size not to exceed 8 pages */ | 
|  | 69 | #define MCC_CQ_LEN		256 | 
|  | 70 |  | 
|  | 71 | #define BE_NAPI_WEIGHT		64 | 
|  | 72 | #define MAX_RX_POST 		BE_NAPI_WEIGHT /* Frags posted at a time */ | 
|  | 73 | #define RX_FRAGS_REFILL_WM	(RX_Q_LEN - MAX_RX_POST) | 
|  | 74 |  | 
|  | 75 | #define BE_MAX_LRO_DESCRIPTORS  16 | 
|  | 76 | #define BE_MAX_FRAGS_PER_FRAME  16 | 
|  | 77 |  | 
|  | 78 | struct be_dma_mem { | 
|  | 79 | void *va; | 
|  | 80 | dma_addr_t dma; | 
|  | 81 | u32 size; | 
|  | 82 | }; | 
|  | 83 |  | 
|  | 84 | struct be_queue_info { | 
|  | 85 | struct be_dma_mem dma_mem; | 
|  | 86 | u16 len; | 
|  | 87 | u16 entry_size;	/* Size of an element in the queue */ | 
|  | 88 | u16 id; | 
|  | 89 | u16 tail, head; | 
|  | 90 | bool created; | 
|  | 91 | atomic_t used;	/* Number of valid elements in the queue */ | 
|  | 92 | }; | 
|  | 93 |  | 
|  | 94 | struct be_ctrl_info { | 
|  | 95 | u8 __iomem *csr; | 
|  | 96 | u8 __iomem *db;		/* Door Bell */ | 
|  | 97 | u8 __iomem *pcicfg;	/* PCI config space */ | 
|  | 98 | int pci_func; | 
|  | 99 |  | 
|  | 100 | /* Mbox used for cmd request/response */ | 
|  | 101 | spinlock_t cmd_lock;	/* For serializing cmds to BE card */ | 
|  | 102 | struct be_dma_mem mbox_mem; | 
|  | 103 | /* Mbox mem is adjusted to align to 16 bytes. The allocated addr | 
|  | 104 | * is stored for freeing purpose */ | 
|  | 105 | struct be_dma_mem mbox_mem_alloced; | 
|  | 106 | }; | 
|  | 107 |  | 
|  | 108 | #include "be_cmds.h" | 
|  | 109 |  | 
|  | 110 | struct be_drvr_stats { | 
|  | 111 | u32 be_tx_reqs;		/* number of TX requests initiated */ | 
|  | 112 | u32 be_tx_stops;	/* number of times TX Q was stopped */ | 
|  | 113 | u32 be_fwd_reqs;	/* number of send reqs through forwarding i/f */ | 
|  | 114 | u32 be_tx_wrbs;		/* number of tx WRBs used */ | 
|  | 115 | u32 be_tx_events;	/* number of tx completion events  */ | 
|  | 116 | u32 be_tx_compl;	/* number of tx completion entries processed */ | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 117 | ulong be_tx_jiffies; | 
|  | 118 | u64 be_tx_bytes; | 
|  | 119 | u64 be_tx_bytes_prev; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 120 | u32 be_tx_rate; | 
|  | 121 |  | 
|  | 122 | u32 cache_barrier[16]; | 
|  | 123 |  | 
|  | 124 | u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */ | 
|  | 125 | u32 be_polls;		/* number of times NAPI called poll function */ | 
|  | 126 | u32 be_rx_events;	/* number of ucast rx completion events  */ | 
|  | 127 | u32 be_rx_compl;	/* number of rx completion entries processed */ | 
|  | 128 | u32 be_lro_hgram_data[8];	/* histogram of LRO data packets */ | 
|  | 129 | u32 be_lro_hgram_ack[8];	/* histogram of LRO ACKs */ | 
| Sathya Perla | 4097f66 | 2009-03-24 16:40:13 -0700 | [diff] [blame] | 130 | ulong be_rx_jiffies; | 
|  | 131 | u64 be_rx_bytes; | 
|  | 132 | u64 be_rx_bytes_prev; | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 133 | u32 be_rx_rate; | 
|  | 134 | /* number of non ether type II frames dropped where | 
|  | 135 | * frame len > length field of Mac Hdr */ | 
|  | 136 | u32 be_802_3_dropped_frames; | 
|  | 137 | /* number of non ether type II frames malformed where | 
|  | 138 | * in frame len < length field of Mac Hdr */ | 
|  | 139 | u32 be_802_3_malformed_frames; | 
|  | 140 | u32 be_rxcp_err;	/* Num rx completion entries w/ err set. */ | 
|  | 141 | ulong rx_fps_jiffies;	/* jiffies at last FPS calc */ | 
|  | 142 | u32 be_rx_frags; | 
|  | 143 | u32 be_prev_rx_frags; | 
|  | 144 | u32 be_rx_fps;		/* Rx frags per second */ | 
|  | 145 | }; | 
|  | 146 |  | 
|  | 147 | struct be_stats_obj { | 
|  | 148 | struct be_drvr_stats drvr_stats; | 
|  | 149 | struct net_device_stats net_stats; | 
|  | 150 | struct be_dma_mem cmd; | 
|  | 151 | }; | 
|  | 152 |  | 
|  | 153 | struct be_eq_obj { | 
|  | 154 | struct be_queue_info q; | 
|  | 155 | char desc[32]; | 
|  | 156 |  | 
|  | 157 | /* Adaptive interrupt coalescing (AIC) info */ | 
|  | 158 | bool enable_aic; | 
|  | 159 | u16 min_eqd;		/* in usecs */ | 
|  | 160 | u16 max_eqd;		/* in usecs */ | 
|  | 161 | u16 cur_eqd;		/* in usecs */ | 
|  | 162 |  | 
|  | 163 | struct napi_struct napi; | 
|  | 164 | }; | 
|  | 165 |  | 
|  | 166 | struct be_tx_obj { | 
|  | 167 | struct be_queue_info q; | 
|  | 168 | struct be_queue_info cq; | 
|  | 169 | /* Remember the skbs that were transmitted */ | 
|  | 170 | struct sk_buff *sent_skb_list[TX_Q_LEN]; | 
|  | 171 | }; | 
|  | 172 |  | 
|  | 173 | /* Struct to remember the pages posted for rx frags */ | 
|  | 174 | struct be_rx_page_info { | 
|  | 175 | struct page *page; | 
|  | 176 | dma_addr_t bus; | 
|  | 177 | u16 page_offset; | 
|  | 178 | bool last_page_user; | 
|  | 179 | }; | 
|  | 180 |  | 
|  | 181 | struct be_rx_obj { | 
|  | 182 | struct be_queue_info q; | 
|  | 183 | struct be_queue_info cq; | 
|  | 184 | struct be_rx_page_info page_info_tbl[RX_Q_LEN]; | 
|  | 185 | struct net_lro_mgr lro_mgr; | 
|  | 186 | struct net_lro_desc lro_desc[BE_MAX_LRO_DESCRIPTORS]; | 
|  | 187 | }; | 
|  | 188 |  | 
|  | 189 | #define BE_NUM_MSIX_VECTORS		2	/* 1 each for Tx and Rx */ | 
|  | 190 | struct be_adapter { | 
|  | 191 | struct pci_dev *pdev; | 
|  | 192 | struct net_device *netdev; | 
|  | 193 |  | 
|  | 194 | /* Mbox, pci config, csr address information */ | 
|  | 195 | struct be_ctrl_info ctrl; | 
|  | 196 |  | 
|  | 197 | struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS]; | 
|  | 198 | bool msix_enabled; | 
|  | 199 | bool isr_registered; | 
|  | 200 |  | 
|  | 201 | /* TX Rings */ | 
|  | 202 | struct be_eq_obj tx_eq; | 
|  | 203 | struct be_tx_obj tx_obj; | 
|  | 204 |  | 
|  | 205 | u32 cache_line_break[8]; | 
|  | 206 |  | 
|  | 207 | /* Rx rings */ | 
|  | 208 | struct be_eq_obj rx_eq; | 
|  | 209 | struct be_rx_obj rx_obj; | 
|  | 210 | u32 big_page_size;	/* Compounded page size shared by rx wrbs */ | 
| Sathya Perla | ea1dae1 | 2009-03-19 23:56:20 -0700 | [diff] [blame] | 211 | bool rx_post_starved;	/* Zero rx frags have been posted to BE */ | 
| Sathya Perla | 6b7c5b9 | 2009-03-11 23:32:03 -0700 | [diff] [blame] | 212 |  | 
|  | 213 | struct vlan_group *vlan_grp; | 
|  | 214 | u16 num_vlans; | 
|  | 215 | u8 vlan_tag[VLAN_GROUP_ARRAY_LEN]; | 
|  | 216 |  | 
|  | 217 | struct be_stats_obj stats; | 
|  | 218 | /* Work queue used to perform periodic tasks like getting statistics */ | 
|  | 219 | struct delayed_work work; | 
|  | 220 |  | 
|  | 221 | /* Ethtool knobs and info */ | 
|  | 222 | bool rx_csum; 		/* BE card must perform rx-checksumming */ | 
|  | 223 | u32 max_rx_coal; | 
|  | 224 | char fw_ver[FW_VER_LEN]; | 
|  | 225 | u32 if_handle;		/* Used to configure filtering */ | 
|  | 226 | u32 pmac_id;		/* MAC addr handle used by BE card */ | 
|  | 227 |  | 
|  | 228 | struct be_link_info link; | 
|  | 229 | u32 port_num; | 
|  | 230 | }; | 
|  | 231 |  | 
|  | 232 | extern struct ethtool_ops be_ethtool_ops; | 
|  | 233 |  | 
|  | 234 | #define drvr_stats(adapter)		(&adapter->stats.drvr_stats) | 
|  | 235 |  | 
|  | 236 | #define BE_SET_NETDEV_OPS(netdev, ops)	(netdev->netdev_ops = ops) | 
|  | 237 |  | 
|  | 238 | static inline u32 MODULO(u16 val, u16 limit) | 
|  | 239 | { | 
|  | 240 | BUG_ON(limit & (limit - 1)); | 
|  | 241 | return val & (limit - 1); | 
|  | 242 | } | 
|  | 243 |  | 
|  | 244 | static inline void index_adv(u16 *index, u16 val, u16 limit) | 
|  | 245 | { | 
|  | 246 | *index = MODULO((*index + val), limit); | 
|  | 247 | } | 
|  | 248 |  | 
|  | 249 | static inline void index_inc(u16 *index, u16 limit) | 
|  | 250 | { | 
|  | 251 | *index = MODULO((*index + 1), limit); | 
|  | 252 | } | 
|  | 253 |  | 
|  | 254 | #define PAGE_SHIFT_4K		12 | 
|  | 255 | #define PAGE_SIZE_4K		(1 << PAGE_SHIFT_4K) | 
|  | 256 |  | 
|  | 257 | /* Returns number of pages spanned by the data starting at the given addr */ | 
|  | 258 | #define PAGES_4K_SPANNED(_address, size) 				\ | 
|  | 259 | ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + 	\ | 
|  | 260 | (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) | 
|  | 261 |  | 
|  | 262 | /* Byte offset into the page corresponding to given address */ | 
|  | 263 | #define OFFSET_IN_PAGE(addr)						\ | 
|  | 264 | ((size_t)(addr) & (PAGE_SIZE_4K-1)) | 
|  | 265 |  | 
|  | 266 | /* Returns bit offset within a DWORD of a bitfield */ | 
|  | 267 | #define AMAP_BIT_OFFSET(_struct, field)  				\ | 
|  | 268 | (((size_t)&(((_struct *)0)->field))%32) | 
|  | 269 |  | 
|  | 270 | /* Returns the bit mask of the field that is NOT shifted into location. */ | 
|  | 271 | static inline u32 amap_mask(u32 bitsize) | 
|  | 272 | { | 
|  | 273 | return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1); | 
|  | 274 | } | 
|  | 275 |  | 
|  | 276 | static inline void | 
|  | 277 | amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value) | 
|  | 278 | { | 
|  | 279 | u32 *dw = (u32 *) ptr + dw_offset; | 
|  | 280 | *dw &= ~(mask << offset); | 
|  | 281 | *dw |= (mask & value) << offset; | 
|  | 282 | } | 
|  | 283 |  | 
|  | 284 | #define AMAP_SET_BITS(_struct, field, ptr, val)				\ | 
|  | 285 | amap_set(ptr,						\ | 
|  | 286 | offsetof(_struct, field)/32,			\ | 
|  | 287 | amap_mask(sizeof(((_struct *)0)->field)),	\ | 
|  | 288 | AMAP_BIT_OFFSET(_struct, field),		\ | 
|  | 289 | val) | 
|  | 290 |  | 
|  | 291 | static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset) | 
|  | 292 | { | 
|  | 293 | u32 *dw = (u32 *) ptr; | 
|  | 294 | return mask & (*(dw + dw_offset) >> offset); | 
|  | 295 | } | 
|  | 296 |  | 
|  | 297 | #define AMAP_GET_BITS(_struct, field, ptr)				\ | 
|  | 298 | amap_get(ptr,						\ | 
|  | 299 | offsetof(_struct, field)/32,			\ | 
|  | 300 | amap_mask(sizeof(((_struct *)0)->field)),	\ | 
|  | 301 | AMAP_BIT_OFFSET(_struct, field)) | 
|  | 302 |  | 
|  | 303 | #define be_dws_cpu_to_le(wrb, len)	swap_dws(wrb, len) | 
|  | 304 | #define be_dws_le_to_cpu(wrb, len)	swap_dws(wrb, len) | 
|  | 305 | static inline void swap_dws(void *wrb, int len) | 
|  | 306 | { | 
|  | 307 | #ifdef __BIG_ENDIAN | 
|  | 308 | u32 *dw = wrb; | 
|  | 309 | BUG_ON(len % 4); | 
|  | 310 | do { | 
|  | 311 | *dw = cpu_to_le32(*dw); | 
|  | 312 | dw++; | 
|  | 313 | len -= 4; | 
|  | 314 | } while (len); | 
|  | 315 | #endif				/* __BIG_ENDIAN */ | 
|  | 316 | } | 
|  | 317 |  | 
|  | 318 | static inline u8 is_tcp_pkt(struct sk_buff *skb) | 
|  | 319 | { | 
|  | 320 | u8 val = 0; | 
|  | 321 |  | 
|  | 322 | if (ip_hdr(skb)->version == 4) | 
|  | 323 | val = (ip_hdr(skb)->protocol == IPPROTO_TCP); | 
|  | 324 | else if (ip_hdr(skb)->version == 6) | 
|  | 325 | val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP); | 
|  | 326 |  | 
|  | 327 | return val; | 
|  | 328 | } | 
|  | 329 |  | 
|  | 330 | static inline u8 is_udp_pkt(struct sk_buff *skb) | 
|  | 331 | { | 
|  | 332 | u8 val = 0; | 
|  | 333 |  | 
|  | 334 | if (ip_hdr(skb)->version == 4) | 
|  | 335 | val = (ip_hdr(skb)->protocol == IPPROTO_UDP); | 
|  | 336 | else if (ip_hdr(skb)->version == 6) | 
|  | 337 | val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP); | 
|  | 338 |  | 
|  | 339 | return val; | 
|  | 340 | } | 
|  | 341 |  | 
|  | 342 | #endif				/* BE_H */ |