blob: 88f30d254095d04a76c6ceb723a37e1ed8656d75 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Eilon Greensteind05c26c2009-01-17 23:26:13 -08003 * Copyright (c) 2007-2009 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
52
Eilon Greenstein359d8b12009-02-12 08:38:25 +000053
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000057#include "bnx2x_dump.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020058
Eilon Greenstein573dd782009-07-29 00:20:11 +000059#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
Eilon Greenstein34f80b02008-06-23 20:33:01 -070061#define BNX2X_BC_VER 0x040200
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020062
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070063#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
Eilon Greenstein34f80b02008-06-23 20:33:01 -070069/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020071
Andrew Morton53a10562008-02-09 23:16:41 -080072static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070073 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020074 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070076MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000077MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020078MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020080
Eilon Greenstein555f6c72009-02-12 08:36:11 +000081static int multi_mode = 1;
82module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070083MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000095
Eilon Greenstein19680c42008-08-13 15:47:33 -070096static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070097module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000098MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +000099
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
Eilon Greenstein9898f862009-02-12 08:38:27 +0000104static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200105module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000106MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000107
108static int mrrs = -1;
109module_param(mrrs, int, 0);
110MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
111
Eilon Greenstein9898f862009-02-12 08:38:27 +0000112static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200113module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000114MODULE_PARM_DESC(debug, " Default debug msglevel");
115
116static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200117
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800118static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200119
120enum bnx2x_board_type {
121 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700122 BCM57711 = 1,
123 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200124};
125
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700126/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800127static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200128 char *name;
129} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200133};
134
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700135
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200136static const struct pci_device_id bnx2x_pci_tbl[] = {
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
152/* used only at init
153 * locking is done by mcp
154 */
155static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156{
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
161}
162
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200163static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164{
165 u32 val;
166
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
171
172 return val;
173}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200174
175static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180};
181
182/* copy command into DMAE command memory and set DMAE command go */
183static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
184 int idx)
185{
186 u32 cmd_offset;
187 int i;
188
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
192
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200195 }
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
197}
198
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700199void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
200 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200201{
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700202 struct dmae_command *dmae = &bp->init_dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700204 int cnt = 200;
205
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
208
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
212 return;
213 }
214
215 mutex_lock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200216
217 memset(dmae, 0, sizeof(struct dmae_command));
218
219 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
222#ifdef __BIG_ENDIAN
223 DMAE_CMD_ENDIANITY_B_DW_SWAP |
224#else
225 DMAE_CMD_ENDIANITY_DW_SWAP |
226#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700227 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200229 dmae->src_addr_lo = U64_LO(dma_addr);
230 dmae->src_addr_hi = U64_HI(dma_addr);
231 dmae->dst_addr_lo = dst_addr >> 2;
232 dmae->dst_addr_hi = 0;
233 dmae->len = len32;
234 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700236 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200237
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000238 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200239 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700245 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200246 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200248
249 *wb_comp = 0;
250
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700251 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200252
253 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700254
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
257
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700258 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000259 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200260 break;
261 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700262 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
265 msleep(100);
266 else
267 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200268 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700269
270 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200271}
272
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700273void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200274{
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700275 struct dmae_command *dmae = &bp->init_dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700277 int cnt = 200;
278
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
281 int i;
282
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
287 return;
288 }
289
290 mutex_lock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200291
292 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293 memset(dmae, 0, sizeof(struct dmae_command));
294
295 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
298#ifdef __BIG_ENDIAN
299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
300#else
301 DMAE_CMD_ENDIANITY_DW_SWAP |
302#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700303 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200305 dmae->src_addr_lo = src_addr >> 2;
306 dmae->src_addr_hi = 0;
307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
309 dmae->len = len32;
310 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700312 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200313
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000314 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200315 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200321
322 *wb_comp = 0;
323
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700324 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200325
326 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700327
328 while (*wb_comp != DMAE_COMP_VAL) {
329
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700330 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000331 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200332 break;
333 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700334 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
337 msleep(100);
338 else
339 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200340 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700344
345 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200346}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200347
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700348/* used only for slowpath so not inlined */
349static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350{
351 u32 wb_write[2];
352
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
356}
357
358#ifdef USE_WB_RD
359static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360{
361 u32 wb_data[2];
362
363 REG_RD_DMAE(bp, reg, wb_data, 2);
364
365 return HILO_U64(wb_data[0], wb_data[1]);
366}
367#endif
368
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200369static int bnx2x_mc_assert(struct bnx2x *bp)
370{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200371 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700372 int i, rc = 0;
373 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200374
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700375 /* XSTORM */
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
378 if (last_idx)
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200380
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200383
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200392
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
397 rc++;
398 } else {
399 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200400 }
401 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700402
403 /* TSTORM */
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
406 if (last_idx)
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
425 rc++;
426 } else {
427 break;
428 }
429 }
430
431 /* CSTORM */
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
434 if (last_idx)
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
453 rc++;
454 } else {
455 break;
456 }
457 }
458
459 /* USTORM */
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
462 if (last_idx)
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
481 rc++;
482 } else {
483 break;
484 }
485 }
486
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200487 return rc;
488}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800489
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200490static void bnx2x_fw_dump(struct bnx2x *bp)
491{
492 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000493 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200494 int word;
495
496 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
Eliezer Tamir49d66772008-02-28 11:53:13 -0800497 mark = ((mark + 0x3) & ~0x3);
Joe Perchesad361c92009-07-06 13:05:40 -0700498 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200499
Joe Perchesad361c92009-07-06 13:05:40 -0700500 printk(KERN_ERR PFX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200501 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502 for (word = 0; word < 8; word++)
503 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
504 offset + 4*word));
505 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800506 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200507 }
508 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
511 offset + 4*word));
512 data[8] = 0x0;
Eliezer Tamir49d66772008-02-28 11:53:13 -0800513 printk(KERN_CONT "%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200514 }
Joe Perchesad361c92009-07-06 13:05:40 -0700515 printk(KERN_ERR PFX "end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200516}
517
518static void bnx2x_panic_dump(struct bnx2x *bp)
519{
520 int i;
521 u16 j, start, end;
522
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700523 bp->stats_state = STATS_STATE_DISABLED;
524 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
525
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200526 BNX2X_ERR("begin crash dump -----------------\n");
527
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000528 /* Indices */
529 /* Common */
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
535
536 /* Rx */
537 for_each_rx_queue(bp, i) {
538 struct bnx2x_fastpath *fp = &bp->fp[i];
539
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
543 i, fp->rx_bd_prod, fp->rx_bd_cons,
544 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp->rx_sge_prod, fp->last_max_sge,
549 le16_to_cpu(fp->fp_u_idx),
550 fp->status_blk->u_status_block.status_block_index);
551 }
552
553 /* Tx */
554 for_each_tx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200555 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200556
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
Eilon Greensteinca003922009-08-12 22:53:28 -0700562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700563 fp->status_blk->c_status_block.status_block_index,
Eilon Greensteinca003922009-08-12 22:53:28 -0700564 fp->tx_db.data.prod);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000565 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200566
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000567 /* Rings */
568 /* Rx */
569 for_each_rx_queue(bp, i) {
570 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200571
572 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000574 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
577
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200580 }
581
Eilon Greenstein3196a882008-08-13 15:58:49 -0700582 start = RX_SGE(fp->rx_sge_prod);
583 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000584 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700585 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
587
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700590 }
591
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200592 start = RCQ_BD(fp->rx_comp_cons - 10);
593 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000594 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200595 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
596
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200599 }
600 }
601
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000602 /* Tx */
603 for_each_tx_queue(bp, i) {
604 struct bnx2x_fastpath *fp = &bp->fp[i];
605
606 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608 for (j = start; j != end; j = TX_BD(j + 1)) {
609 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
610
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000613 }
614
615 start = TX_BD(fp->tx_bd_cons - 10);
616 end = TX_BD(fp->tx_bd_cons + 254);
617 for (j = start; j != end; j = TX_BD(j + 1)) {
618 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
619
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000622 }
623 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200624
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700625 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200626 bnx2x_mc_assert(bp);
627 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200628}
629
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800630static void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200631{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700632 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000636 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200637
638 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200641 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000643 } else if (msi) {
644 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200648 } else {
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200651 HC_CONFIG_0_REG_INT_LINE_EN_0 |
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800653
Eilon Greenstein8badd272009-02-12 08:36:15 +0000654 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
655 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800656
657 REG_WR(bp, addr, val);
658
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200659 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
660 }
661
Eilon Greenstein8badd272009-02-12 08:36:15 +0000662 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200664
665 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000666 /*
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
668 */
669 mmiowb();
670 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700671
672 if (CHIP_IS_E1H(bp)) {
673 /* init leading/trailing edge */
674 if (IS_E1HMF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000675 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700676 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +0000677 /* enable nig and gpio3 attention */
678 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700679 } else
680 val = 0xffff;
681
682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
684 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000685
686 /* Make sure that interrupts are indeed enabled from here on */
687 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200688}
689
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800690static void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200691{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700692 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200693 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694 u32 val = REG_RD(bp, addr);
695
696 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698 HC_CONFIG_0_REG_INT_LINE_EN_0 |
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
700
701 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
702 val, port, addr);
703
Eilon Greenstein8badd272009-02-12 08:36:15 +0000704 /* flush all outstanding writes */
705 mmiowb();
706
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200707 REG_WR(bp, addr, val);
708 if (REG_RD(bp, addr) != val)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
Eilon Greenstein356e2382009-02-12 08:38:32 +0000710
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200711}
712
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700713static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200714{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200715 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000716 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200717
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700718 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200719 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +0000720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
721
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -0700722 if (disable_hw)
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200725
726 /* make sure all ISRs are done */
727 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000728 synchronize_irq(bp->msix_table[0].vector);
729 offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200730 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +0000731 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200732 } else
733 synchronize_irq(bp->pdev->irq);
734
735 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800736 cancel_delayed_work(&bp->sp_task);
737 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200738}
739
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700740/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200741
742/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700743 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200744 */
745
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700746static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200747 u8 storm, u16 index, u8 op, u8 update)
748{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700749 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750 COMMAND_REG_INT_ACK);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200751 struct igu_ack_register igu_ack;
752
753 igu_ack.status_block_index = index;
754 igu_ack.sb_id_and_flags =
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700755 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200756 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
759
Eilon Greenstein5c862842008-08-13 15:51:48 -0700760 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32 *)&igu_ack), hc_addr);
762 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
Eilon Greenstein37dbbf32009-07-21 05:47:33 +0000763
764 /* Make sure that ACK is written */
765 mmiowb();
766 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200767}
768
769static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
770{
771 struct host_status_block *fpsb = fp->status_blk;
772 u16 rc = 0;
773
774 barrier(); /* status block is written to by the chip */
775 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
777 rc |= 1;
778 }
779 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
781 rc |= 2;
782 }
783 return rc;
784}
785
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200786static u16 bnx2x_ack_int(struct bnx2x *bp)
787{
Eilon Greenstein5c862842008-08-13 15:51:48 -0700788 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789 COMMAND_REG_SIMD_MASK);
790 u32 result = REG_RD(bp, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200791
Eilon Greenstein5c862842008-08-13 15:51:48 -0700792 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
793 result, hc_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200794
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200795 return result;
796}
797
798
799/*
800 * fast path service functions
801 */
802
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -0800803static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804{
805 /* Tell compiler that consumer and producer can change */
806 barrier();
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
Eilon Greenstein237907c2009-01-14 06:42:44 +0000808}
809
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200810/* free skb in the packet ring at pos idx
811 * return idx of last bd freed
812 */
813static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
814 u16 idx)
815{
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
Eilon Greensteinca003922009-08-12 22:53:28 -0700817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200819 struct sk_buff *skb = tx_buf->skb;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200821 int nbd;
822
823 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
824 idx, tx_buf, skb);
825
826 /* unmap first bd */
827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200831
Eilon Greensteinca003922009-08-12 22:53:28 -0700832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200833#ifdef BNX2X_STOP_ON_ERROR
Eilon Greensteinca003922009-08-12 22:53:28 -0700834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700835 BNX2X_ERR("BAD nbd!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200836 bnx2x_panic();
837 }
838#endif
Eilon Greensteinca003922009-08-12 22:53:28 -0700839 new_cons = nbd + tx_buf->first_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200840
Eilon Greensteinca003922009-08-12 22:53:28 -0700841 /* Get the next bd */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
843
844 /* Skip a parse bd... */
845 --nbd;
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
847
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
850 --nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200852 }
853
854 /* now free frags */
855 while (nbd > 0) {
856
857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Eilon Greensteinca003922009-08-12 22:53:28 -0700858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200861 if (--nbd)
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863 }
864
865 /* release skb */
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700866 WARN_ON(!skb);
Eilon Greensteinca003922009-08-12 22:53:28 -0700867 dev_kfree_skb_any(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 tx_buf->first_bd = 0;
869 tx_buf->skb = NULL;
870
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700871 return new_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200872}
873
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700874static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200875{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700876 s16 used;
877 u16 prod;
878 u16 cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200879
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700880 barrier(); /* Tell compiler that prod and cons can change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200881 prod = fp->tx_bd_prod;
882 cons = fp->tx_bd_cons;
883
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200887
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700888#ifdef BNX2X_STOP_ON_ERROR
Ilpo Järvinen53e5e962008-07-25 21:40:45 -0700889 WARN_ON(used < 0);
890 WARN_ON(used > fp->bp->tx_ring_size);
891 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700892#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200893
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700894 return (s16)(fp->bp->tx_ring_size) - used;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200895}
896
Eilon Greenstein7961f792009-03-02 07:59:31 +0000897static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200898{
899 struct bnx2x *bp = fp->bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000900 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200901 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
902 int done = 0;
903
904#ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
906 return;
907#endif
908
Eilon Greensteinca003922009-08-12 22:53:28 -0700909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911 sw_cons = fp->tx_pkt_cons;
912
913 while (sw_cons != hw_cons) {
914 u16 pkt_cons;
915
916 pkt_cons = TX_BD(sw_cons);
917
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
919
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700920 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200921 hw_cons, sw_cons, pkt_cons);
922
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700923/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200924 rmb();
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
926 }
927*/
928 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
929 sw_cons++;
930 done++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200931 }
932
933 fp->tx_pkt_cons = sw_cons;
934 fp->tx_bd_cons = bd_cons;
935
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200936 /* TBD need a thresh? */
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000937 if (unlikely(netif_tx_queue_stopped(txq))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200938
Eilon Greenstein60447352009-03-02 07:59:24 +0000939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
943 * forever.
944 */
945 smp_mb();
946
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000947 if ((netif_tx_queue_stopped(txq)) &&
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -0700948 (bp->state == BNX2X_STATE_OPEN) &&
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
Eilon Greenstein555f6c72009-02-12 08:36:11 +0000950 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200951 }
952}
953
Eilon Greenstein3196a882008-08-13 15:58:49 -0700954
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200955static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956 union eth_rx_cqe *rr_cqe)
957{
958 struct bnx2x *bp = fp->bp;
959 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
961
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700962 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +0000964 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700965 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200966
967 bp->spq_left++;
968
Eilon Greenstein0626b892009-02-12 08:38:14 +0000969 if (fp->index) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200970 switch (command | fp->state) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972 BNX2X_FP_STATE_OPENING):
973 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
974 cid);
975 fp->state = BNX2X_FP_STATE_OPEN;
976 break;
977
978 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
980 cid);
981 fp->state = BNX2X_FP_STATE_HALTED;
982 break;
983
984 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command, fp->state);
987 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200988 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700989 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200990 return;
991 }
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800992
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200993 switch (command | bp->state) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996 bp->state = BNX2X_STATE_OPEN;
997 break;
998
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002 fp->state = BNX2X_FP_STATE_HALTED;
1003 break;
1004
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001005 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001006 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
Eliezer Tamir49d66772008-02-28 11:53:13 -08001007 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001008 break;
1009
Eilon Greenstein3196a882008-08-13 15:58:49 -07001010
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001013 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001014 bp->set_mac_pending = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001015 break;
1016
Eliezer Tamir49d66772008-02-28 11:53:13 -08001017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
Eilon Greensteinca003922009-08-12 22:53:28 -07001018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
Eliezer Tamir49d66772008-02-28 11:53:13 -08001020 break;
1021
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001022 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001024 command, bp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001025 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001026 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001027 mb(); /* force bnx2x_wait_ramrod() to see the change */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001028}
1029
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001030static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1032{
1033 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034 struct page *page = sw_buf->page;
1035 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1036
1037 /* Skip "next page" elements */
1038 if (!page)
1039 return;
1040
1041 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001042 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001043 __free_pages(page, PAGES_PER_SGE_SHIFT);
1044
1045 sw_buf->page = NULL;
1046 sge->addr_hi = 0;
1047 sge->addr_lo = 0;
1048}
1049
1050static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051 struct bnx2x_fastpath *fp, int last)
1052{
1053 int i;
1054
1055 for (i = 0; i < last; i++)
1056 bnx2x_free_rx_sge(bp, fp, i);
1057}
1058
1059static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1061{
1062 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1065 dma_addr_t mapping;
1066
1067 if (unlikely(page == NULL))
1068 return -ENOMEM;
1069
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001070 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001071 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1074 return -ENOMEM;
1075 }
1076
1077 sw_buf->page = page;
1078 pci_unmap_addr_set(sw_buf, mapping, mapping);
1079
1080 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1082
1083 return 0;
1084}
1085
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001086static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, u16 index)
1088{
1089 struct sk_buff *skb;
1090 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1092 dma_addr_t mapping;
1093
1094 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095 if (unlikely(skb == NULL))
1096 return -ENOMEM;
1097
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001098 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001099 PCI_DMA_FROMDEVICE);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -07001100 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001101 dev_kfree_skb(skb);
1102 return -ENOMEM;
1103 }
1104
1105 rx_buf->skb = skb;
1106 pci_unmap_addr_set(rx_buf, mapping, mapping);
1107
1108 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1110
1111 return 0;
1112}
1113
1114/* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1118 */
1119static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120 struct sk_buff *skb, u16 cons, u16 prod)
1121{
1122 struct bnx2x *bp = fp->bp;
1123 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1127
1128 pci_dma_sync_single_for_device(bp->pdev,
1129 pci_unmap_addr(cons_rx_buf, mapping),
Eilon Greenstein87942b42009-02-12 08:36:49 +00001130 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001131
1132 prod_rx_buf->skb = cons_rx_buf->skb;
1133 pci_unmap_addr_set(prod_rx_buf, mapping,
1134 pci_unmap_addr(cons_rx_buf, mapping));
1135 *prod_bd = *cons_bd;
1136}
1137
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001138static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1139 u16 idx)
1140{
1141 u16 last_max = fp->last_max_sge;
1142
1143 if (SUB_S16(idx, last_max) > 0)
1144 fp->last_max_sge = idx;
1145}
1146
1147static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1148{
1149 int i, j;
1150
1151 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152 int idx = RX_SGE_CNT * i - 1;
1153
1154 for (j = 0; j < 2; j++) {
1155 SGE_MASK_CLEAR_BIT(fp, idx);
1156 idx--;
1157 }
1158 }
1159}
1160
1161static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162 struct eth_fast_path_rx_cqe *fp_cqe)
1163{
1164 struct bnx2x *bp = fp->bp;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001165 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001166 le16_to_cpu(fp_cqe->len_on_bd)) >>
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001167 SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001168 u16 last_max, last_elem, first_elem;
1169 u16 delta = 0;
1170 u16 i;
1171
1172 if (!sge_len)
1173 return;
1174
1175 /* First mark all used pages */
1176 for (i = 0; i < sge_len; i++)
1177 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1178
1179 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1181
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp->sge_mask));
1184 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1185
1186 last_max = RX_SGE(fp->last_max_sge);
1187 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1189
1190 /* If ring is not full */
1191 if (last_elem + 1 != first_elem)
1192 last_elem++;
1193
1194 /* Now update the prod */
1195 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196 if (likely(fp->sge_mask[i]))
1197 break;
1198
1199 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200 delta += RX_SGE_MASK_ELEM_SZ;
1201 }
1202
1203 if (delta > 0) {
1204 fp->rx_sge_prod += delta;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp);
1207 }
1208
1209 DP(NETIF_MSG_RX_STATUS,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp->last_max_sge, fp->rx_sge_prod);
1212}
1213
1214static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1215{
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp->sge_mask, 0xff,
1218 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1219
Eilon Greenstein33471622008-08-13 15:59:08 -07001220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1225}
1226
1227static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228 struct sk_buff *skb, u16 cons, u16 prod)
1229{
1230 struct bnx2x *bp = fp->bp;
1231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1234 dma_addr_t mapping;
1235
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001239 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001240 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1241
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp->tpa_pool[queue] = *cons_rx_buf;
1244
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1248
1249 fp->tpa_state[queue] = BNX2X_TPA_START;
1250
1251 /* point prod_bd to new skb */
1252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1254
1255#ifdef BNX2X_STOP_ON_ERROR
1256 fp->tpa_queue_used |= (1 << queue);
1257#ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1259#else
1260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1261#endif
1262 fp->tpa_queue_used);
1263#endif
1264}
1265
1266static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 struct sk_buff *skb,
1268 struct eth_fast_path_rx_cqe *fp_cqe,
1269 u16 cqe_idx)
1270{
1271 struct sw_rx_page *rx_pg, old_rx_pg;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273 u32 i, frag_len, frag_size, pages;
1274 int err;
1275 int j;
1276
1277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001279
1280 /* This is needed in order to enable forwarding support */
1281 if (frag_size)
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001283 max(frag_size, (u32)len_on_bd));
1284
1285#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001286 if (pages >
1287 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1289 pages, cqe_idx);
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe->pkt_len, len_on_bd);
1292 bnx2x_panic();
1293 return -EINVAL;
1294 }
1295#endif
1296
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1300
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001304 rx_pg = &fp->rx_page_ring[sge_idx];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001305 old_rx_pg = *rx_pg;
1306
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310 if (unlikely(err)) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00001311 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001312 return err;
1313 }
1314
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08001317 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001318
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1321
1322 skb->data_len += frag_len;
1323 skb->truesize += frag_len;
1324 skb->len += frag_len;
1325
1326 frag_size -= frag_len;
1327 }
1328
1329 return 0;
1330}
1331
1332static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1334 u16 cqe_idx)
1335{
1336 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337 struct sk_buff *skb = rx_buf->skb;
1338 /* alloc new skb */
1339 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1340
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1343 fails. */
1344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001345 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001346
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001347 if (likely(new_skb)) {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001350#ifdef BCM_VLAN
1351 int is_vlan_cqe =
1352 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353 PARSING_FLAGS_VLAN);
1354 int is_not_hwaccel_vlan_cqe =
1355 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1356#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001357
1358 prefetch(skb);
1359 prefetch(((char *)(skb)) + 128);
1360
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001361#ifdef BNX2X_STOP_ON_ERROR
1362 if (pad + len > bp->rx_buf_size) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad, len, bp->rx_buf_size);
1366 bnx2x_panic();
1367 return;
1368 }
1369#endif
1370
1371 skb_reserve(skb, pad);
1372 skb_put(skb, len);
1373
1374 skb->protocol = eth_type_trans(skb, bp->dev);
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1376
1377 {
1378 struct iphdr *iph;
1379
1380 iph = (struct iphdr *)skb->data;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001381#ifdef BCM_VLAN
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe))
1385 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1386#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001387 iph->check = 0;
1388 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1389 }
1390
1391 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392 &cqe->fast_path_cqe, cqe_idx)) {
1393#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001394 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395 (!is_not_hwaccel_vlan_cqe))
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001396 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397 le16_to_cpu(cqe->fast_path_cqe.
1398 vlan_tag));
1399 else
1400#endif
1401 netif_receive_skb(skb);
1402 } else {
1403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1405 dev_kfree_skb(skb);
1406 }
1407
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001408
1409 /* put new skb in bin */
1410 fp->tpa_pool[queue].skb = new_skb;
1411
1412 } else {
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001413 /* else drop the packet and keep the buffer in the bin */
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001414 DP(NETIF_MSG_RX_STATUS,
1415 "Failed to allocate new skb - dropping packet!\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001416 fp->eth_q_stats.rx_skb_alloc_failed++;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001417 }
1418
1419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1420}
1421
1422static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423 struct bnx2x_fastpath *fp,
1424 u16 bd_prod, u16 rx_comp_prod,
1425 u16 rx_sge_prod)
1426{
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001427 struct ustorm_eth_rx_producers rx_prods = {0};
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001428 int i;
1429
1430 /* Update producers */
1431 rx_prods.bd_prod = bd_prod;
1432 rx_prods.cqe_prod = rx_comp_prod;
1433 rx_prods.sge_prod = rx_sge_prod;
1434
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001435 /*
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1438 * is updated.
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1442 */
1443 wmb();
1444
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08001445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00001447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001448 ((u32 *)&rx_prods)[i]);
1449
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -08001450 mmiowb(); /* keep prod updates ordered */
1451
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001452 DP(NETIF_MSG_RX_STATUS,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001455}
1456
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001457static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1458{
1459 struct bnx2x *bp = fp->bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001460 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001461 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1462 int rx_pkt = 0;
1463
1464#ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp->panic))
1466 return 0;
1467#endif
1468
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001471 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1473 hw_comp_cons++;
1474
1475 bd_cons = fp->rx_bd_cons;
1476 bd_prod = fp->rx_bd_prod;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001477 bd_prod_fw = bd_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001478 sw_comp_cons = fp->rx_comp_cons;
1479 sw_comp_prod = fp->rx_comp_prod;
1480
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1483 */
1484 rmb();
1485
1486 DP(NETIF_MSG_RX_STATUS,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001488 fp->index, hw_comp_cons, sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001489
1490 while (sw_comp_cons != hw_comp_cons) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001491 struct sw_rx_bd *rx_buf = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001492 struct sk_buff *skb;
1493 union eth_rx_cqe *cqe;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001494 u8 cqe_fp_flags;
1495 u16 len, pad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001496
1497 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498 bd_prod = RX_BD(bd_prod);
1499 bd_cons = RX_BD(bd_cons);
1500
1501 cqe = &fp->rx_comp_ring[comp_ring_cons];
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001502 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001503
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001504 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001505 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1506 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
Eilon Greenstein68d59482009-01-14 21:27:36 -08001507 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001508 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001510
1511 /* is this a slowpath msg? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001512 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001513 bnx2x_sp_event(fp, cqe);
1514 goto next_cqe;
1515
1516 /* this is an rx packet */
1517 } else {
1518 rx_buf = &fp->rx_buf_ring[bd_cons];
1519 skb = rx_buf->skb;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001520 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521 pad = cqe->fast_path_cqe.placement_offset;
1522
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001523 /* If CQE is marked both TPA_START and TPA_END
1524 it is a non-TPA CQE */
1525 if ((!fp->disable_tpa) &&
1526 (TPA_TYPE(cqe_fp_flags) !=
1527 (TPA_TYPE_START | TPA_TYPE_END))) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07001528 u16 queue = cqe->fast_path_cqe.queue_index;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001529
1530 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531 DP(NETIF_MSG_RX_STATUS,
1532 "calling tpa_start on queue %d\n",
1533 queue);
1534
1535 bnx2x_tpa_start(fp, queue, skb,
1536 bd_cons, bd_prod);
1537 goto next_rx;
1538 }
1539
1540 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541 DP(NETIF_MSG_RX_STATUS,
1542 "calling tpa_stop on queue %d\n",
1543 queue);
1544
1545 if (!BNX2X_RX_SUM_FIX(cqe))
1546 BNX2X_ERR("STOP on none TCP "
1547 "data\n");
1548
1549 /* This is a size of the linear data
1550 on this skb */
1551 len = le16_to_cpu(cqe->fast_path_cqe.
1552 len_on_bd);
1553 bnx2x_tpa_stop(bp, fp, queue, pad,
1554 len, cqe, comp_ring_cons);
1555#ifdef BNX2X_STOP_ON_ERROR
1556 if (bp->panic)
Stanislaw Gruszka17cb40062009-05-05 23:22:12 +00001557 return 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001558#endif
1559
1560 bnx2x_update_sge_prod(fp,
1561 &cqe->fast_path_cqe);
1562 goto next_cqe;
1563 }
1564 }
1565
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001566 pci_dma_sync_single_for_device(bp->pdev,
1567 pci_unmap_addr(rx_buf, mapping),
1568 pad + RX_COPY_THRESH,
1569 PCI_DMA_FROMDEVICE);
1570 prefetch(skb);
1571 prefetch(((char *)(skb)) + 128);
1572
1573 /* is this an error packet? */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001574 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001575 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001576 "ERROR flags %x rx packet %u\n",
1577 cqe_fp_flags, sw_comp_cons);
Eilon Greensteinde832a52009-02-12 08:36:33 +00001578 fp->eth_q_stats.rx_err_discard_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001579 goto reuse_rx;
1580 }
1581
1582 /* Since we don't have a jumbo ring
1583 * copy small packets if mtu > 1500
1584 */
1585 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586 (len <= RX_COPY_THRESH)) {
1587 struct sk_buff *new_skb;
1588
1589 new_skb = netdev_alloc_skb(bp->dev,
1590 len + pad);
1591 if (new_skb == NULL) {
1592 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001593 "ERROR packet dropped "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001594 "because of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001595 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001596 goto reuse_rx;
1597 }
1598
1599 /* aligned copy */
1600 skb_copy_from_linear_data_offset(skb, pad,
1601 new_skb->data + pad, len);
1602 skb_reserve(new_skb, pad);
1603 skb_put(new_skb, len);
1604
1605 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1606
1607 skb = new_skb;
1608
1609 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610 pci_unmap_single(bp->pdev,
1611 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07001612 bp->rx_buf_size,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001613 PCI_DMA_FROMDEVICE);
1614 skb_reserve(skb, pad);
1615 skb_put(skb, len);
1616
1617 } else {
1618 DP(NETIF_MSG_RX_ERR,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001619 "ERROR packet dropped because "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001620 "of alloc failure\n");
Eilon Greensteinde832a52009-02-12 08:36:33 +00001621 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001622reuse_rx:
1623 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1624 goto next_rx;
1625 }
1626
1627 skb->protocol = eth_type_trans(skb, bp->dev);
1628
1629 skb->ip_summed = CHECKSUM_NONE;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001630 if (bp->rx_csum) {
Eilon Greenstein1adcd8b2008-08-13 15:48:29 -07001631 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632 skb->ip_summed = CHECKSUM_UNNECESSARY;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001633 else
Eilon Greensteinde832a52009-02-12 08:36:33 +00001634 fp->eth_q_stats.hw_csum_err++;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07001635 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001636 }
1637
Eilon Greenstein748e5432009-02-12 08:36:37 +00001638 skb_record_rx_queue(skb, fp->index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001639#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08001640 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001641 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642 PARSING_FLAGS_VLAN))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001643 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1645 else
1646#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001647 netif_receive_skb(skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001648
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001649
1650next_rx:
1651 rx_buf->skb = NULL;
1652
1653 bd_cons = NEXT_RX_IDX(bd_cons);
1654 bd_prod = NEXT_RX_IDX(bd_prod);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001655 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1656 rx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001657next_cqe:
1658 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001660
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001661 if (rx_pkt == budget)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001662 break;
1663 } /* while */
1664
1665 fp->rx_bd_cons = bd_cons;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001666 fp->rx_bd_prod = bd_prod_fw;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001667 fp->rx_comp_cons = sw_comp_cons;
1668 fp->rx_comp_prod = sw_comp_prod;
1669
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001670 /* Update producers */
1671 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1672 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001673
1674 fp->rx_pkt += rx_pkt;
1675 fp->rx_calls++;
1676
1677 return rx_pkt;
1678}
1679
1680static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1681{
1682 struct bnx2x_fastpath *fp = fp_cookie;
1683 struct bnx2x *bp = fp->bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001684
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07001685 /* Return here if interrupt is disabled */
1686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1688 return IRQ_HANDLED;
1689 }
1690
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001691 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07001692 fp->index, fp->sb_id);
Eilon Greenstein0626b892009-02-12 08:38:14 +00001693 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001694
1695#ifdef BNX2X_STOP_ON_ERROR
1696 if (unlikely(bp->panic))
1697 return IRQ_HANDLED;
1698#endif
Eilon Greensteinca003922009-08-12 22:53:28 -07001699 /* Handle Rx or Tx according to MSI-X vector */
1700 if (fp->is_rx_queue) {
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(&fp->status_blk->u_status_block.status_block_index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001703
Eilon Greensteinca003922009-08-12 22:53:28 -07001704 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001705
Eilon Greensteinca003922009-08-12 22:53:28 -07001706 } else {
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709
1710 bnx2x_update_fpsb_idx(fp);
1711 rmb();
1712 bnx2x_tx_int(fp);
1713
1714 /* Re-enable interrupts */
1715 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1719 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001720
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001721 return IRQ_HANDLED;
1722}
1723
1724static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1725{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001726 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001727 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001728 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001729 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001730
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001731 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001732 if (unlikely(status == 0)) {
1733 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1734 return IRQ_NONE;
1735 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001736 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001737
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001738 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001739 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1741 return IRQ_HANDLED;
1742 }
1743
Eilon Greenstein3196a882008-08-13 15:58:49 -07001744#ifdef BNX2X_STOP_ON_ERROR
1745 if (unlikely(bp->panic))
1746 return IRQ_HANDLED;
1747#endif
1748
Eilon Greensteinca003922009-08-12 22:53:28 -07001749 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001751
Eilon Greensteinca003922009-08-12 22:53:28 -07001752 mask = 0x2 << fp->sb_id;
1753 if (status & mask) {
1754 /* Handle Rx or Tx according to SB id */
1755 if (fp->is_rx_queue) {
1756 prefetch(fp->rx_cons_sb);
1757 prefetch(&fp->status_blk->u_status_block.
1758 status_block_index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001759
Eilon Greensteinca003922009-08-12 22:53:28 -07001760 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001761
Eilon Greensteinca003922009-08-12 22:53:28 -07001762 } else {
1763 prefetch(fp->tx_cons_sb);
1764 prefetch(&fp->status_blk->c_status_block.
1765 status_block_index);
1766
1767 bnx2x_update_fpsb_idx(fp);
1768 rmb();
1769 bnx2x_tx_int(fp);
1770
1771 /* Re-enable interrupts */
1772 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773 le16_to_cpu(fp->fp_u_idx),
1774 IGU_INT_NOP, 1);
1775 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776 le16_to_cpu(fp->fp_c_idx),
1777 IGU_INT_ENABLE, 1);
1778 }
1779 status &= ~mask;
1780 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001781 }
1782
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001783
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001784 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001785 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001786
1787 status &= ~0x1;
1788 if (!status)
1789 return IRQ_HANDLED;
1790 }
1791
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001792 if (status)
1793 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1794 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001795
1796 return IRQ_HANDLED;
1797}
1798
1799/* end of fast path */
1800
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001801static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001802
1803/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001804
1805/*
1806 * General service functions
1807 */
1808
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001809static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001810{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001811 u32 lock_status;
1812 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001813 int func = BP_FUNC(bp);
1814 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001815 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001816
1817 /* Validating that the resource is within range */
1818 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1819 DP(NETIF_MSG_HW,
1820 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1822 return -EINVAL;
1823 }
1824
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001825 if (func <= 5) {
1826 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1827 } else {
1828 hw_lock_control_reg =
1829 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1830 }
1831
Eliezer Tamirf1410642008-02-28 11:51:50 -08001832 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001833 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001834 if (lock_status & resource_bit) {
1835 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1836 lock_status, resource_bit);
1837 return -EEXIST;
1838 }
1839
Eilon Greenstein46230472008-08-25 15:23:30 -07001840 /* Try for 5 second every 5ms */
1841 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001842 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001843 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001845 if (lock_status & resource_bit)
1846 return 0;
1847
1848 msleep(5);
1849 }
1850 DP(NETIF_MSG_HW, "Timeout\n");
1851 return -EAGAIN;
1852}
1853
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001854static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001855{
1856 u32 lock_status;
1857 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001858 int func = BP_FUNC(bp);
1859 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001860
1861 /* Validating that the resource is within range */
1862 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1863 DP(NETIF_MSG_HW,
1864 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1866 return -EINVAL;
1867 }
1868
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001869 if (func <= 5) {
1870 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1871 } else {
1872 hw_lock_control_reg =
1873 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1874 }
1875
Eliezer Tamirf1410642008-02-28 11:51:50 -08001876 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001877 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001878 if (!(lock_status & resource_bit)) {
1879 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1880 lock_status, resource_bit);
1881 return -EFAULT;
1882 }
1883
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001884 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001885 return 0;
1886}
1887
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001888/* HW Lock for shared dual port PHYs */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001889static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001890{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001891 mutex_lock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001892
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001893 if (bp->port.need_hw_lock)
1894 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001895}
1896
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001897static void bnx2x_release_phy_lock(struct bnx2x *bp)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001898{
Eilon Greenstein46c6a672009-02-12 08:36:58 +00001899 if (bp->port.need_hw_lock)
1900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001901
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001902 mutex_unlock(&bp->port.phy_mutex);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001903}
1904
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001905int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1906{
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910 int gpio_shift = gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912 u32 gpio_mask = (1 << gpio_shift);
1913 u32 gpio_reg;
1914 int value;
1915
1916 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1918 return -EINVAL;
1919 }
1920
1921 /* read GPIO value */
1922 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1923
1924 /* get the requested pin value */
1925 if ((gpio_reg & gpio_mask) == gpio_mask)
1926 value = 1;
1927 else
1928 value = 0;
1929
1930 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1931
1932 return value;
1933}
1934
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001935int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001936{
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1943 u32 gpio_reg;
1944
1945 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1947 return -EINVAL;
1948 }
1949
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001950 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001951 /* read GPIO and mask except the float bits */
1952 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1953
1954 switch (mode) {
1955 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957 gpio_num, gpio_shift);
1958 /* clear FLOAT and set CLR */
1959 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1961 break;
1962
1963 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965 gpio_num, gpio_shift);
1966 /* clear FLOAT and set SET */
1967 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1969 break;
1970
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001971 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001972 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973 gpio_num, gpio_shift);
1974 /* set FLOAT */
1975 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1976 break;
1977
1978 default:
1979 break;
1980 }
1981
1982 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001983 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001984
1985 return 0;
1986}
1987
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001988int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989{
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1996 u32 gpio_reg;
1997
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2000 return -EINVAL;
2001 }
2002
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004 /* read GPIO int */
2005 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2006
2007 switch (mode) {
2008 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010 "output low\n", gpio_num, gpio_shift);
2011 /* clear SET and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2014 break;
2015
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018 "output high\n", gpio_num, gpio_shift);
2019 /* clear CLR and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2022 break;
2023
2024 default:
2025 break;
2026 }
2027
2028 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2030
2031 return 0;
2032}
2033
Eliezer Tamirf1410642008-02-28 11:51:50 -08002034static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2035{
2036 u32 spio_mask = (1 << spio_num);
2037 u32 spio_reg;
2038
2039 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040 (spio_num > MISC_REGISTERS_SPIO_7)) {
2041 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2042 return -EINVAL;
2043 }
2044
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002046 /* read SPIO and mask except the float bits */
2047 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2048
2049 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07002050 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002051 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052 /* clear FLOAT and set CLR */
2053 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2055 break;
2056
Eilon Greenstein6378c022008-08-13 15:59:25 -07002057 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08002058 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059 /* clear FLOAT and set SET */
2060 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2062 break;
2063
2064 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2066 /* set FLOAT */
2067 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2068 break;
2069
2070 default:
2071 break;
2072 }
2073
2074 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002076
2077 return 0;
2078}
2079
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002080static void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002081{
Eilon Greensteinad33ea32009-01-14 21:24:57 -08002082 switch (bp->link_vars.ieee_fc &
2083 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002084 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002085 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002086 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002087 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002088
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002089 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002090 bp->port.advertising |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002091 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002092 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002093
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002094 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002095 bp->port.advertising |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08002096 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00002097
Eliezer Tamirf1410642008-02-28 11:51:50 -08002098 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002099 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002100 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08002101 break;
2102 }
2103}
2104
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002105static void bnx2x_link_report(struct bnx2x *bp)
2106{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002107 if (bp->state == BNX2X_STATE_DISABLED) {
2108 netif_carrier_off(bp->dev);
2109 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2110 return;
2111 }
2112
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002113 if (bp->link_vars.link_up) {
2114 if (bp->state == BNX2X_STATE_OPEN)
2115 netif_carrier_on(bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002116 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2117
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002118 printk("%d Mbps ", bp->link_vars.line_speed);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002119
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002120 if (bp->link_vars.duplex == DUPLEX_FULL)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002121 printk("full duplex");
2122 else
2123 printk("half duplex");
2124
David S. Millerc0700f92008-12-16 23:53:20 -08002125 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2126 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002127 printk(", receive ");
Eilon Greenstein356e2382009-02-12 08:38:32 +00002128 if (bp->link_vars.flow_ctrl &
2129 BNX2X_FLOW_CTRL_TX)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002130 printk("& transmit ");
2131 } else {
2132 printk(", transmit ");
2133 }
2134 printk("flow control ON");
2135 }
2136 printk("\n");
2137
2138 } else { /* link_down */
2139 netif_carrier_off(bp->dev);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002140 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002141 }
2142}
2143
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002144static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002145{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002146 if (!BP_NOMCP(bp)) {
2147 u8 rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002148
Eilon Greenstein19680c42008-08-13 15:47:33 -07002149 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002150 /* It is recommended to turn off RX FC for jumbo frames
2151 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00002152 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08002153 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07002154 else
David S. Millerc0700f92008-12-16 23:53:20 -08002155 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002156
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002157 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002158
2159 if (load_mode == LOAD_DIAG)
2160 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2161
Eilon Greenstein19680c42008-08-13 15:47:33 -07002162 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002163
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002164 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002165
Eilon Greenstein3c96c682009-01-14 21:25:31 -08002166 bnx2x_calc_fc_adv(bp);
2167
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002168 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2169 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002170 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00002171 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002172
Eilon Greenstein19680c42008-08-13 15:47:33 -07002173 return rc;
2174 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00002175 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07002176 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002177}
2178
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002179static void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002180{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002181 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002182 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002183 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002184 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002185
Eilon Greenstein19680c42008-08-13 15:47:33 -07002186 bnx2x_calc_fc_adv(bp);
2187 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002188 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002189}
2190
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002191static void bnx2x__link_reset(struct bnx2x *bp)
2192{
Eilon Greenstein19680c42008-08-13 15:47:33 -07002193 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002194 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00002195 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002196 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07002197 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00002198 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002199}
2200
2201static u8 bnx2x_link_test(struct bnx2x *bp)
2202{
2203 u8 rc;
2204
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002205 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002206 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002207 bnx2x_release_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002208
2209 return rc;
2210}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002211
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002212static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002213{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002214 u32 r_param = bp->link_vars.line_speed / 8;
2215 u32 fair_periodic_timeout_usec;
2216 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002217
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002218 memset(&(bp->cmng.rs_vars), 0,
2219 sizeof(struct rate_shaping_vars_per_port));
2220 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002221
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002222 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2223 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002224
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002225 /* this is the threshold below which no timer arming will occur
2226 1.25 coefficient is for the threshold to be a little bigger
2227 than the real time, to compensate for timer in-accuracy */
2228 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002229 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2230
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002231 /* resolution of fairness timer */
2232 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2233 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2234 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002235
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002236 /* this is the threshold below which we won't arm the timer anymore */
2237 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002238
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002239 /* we multiply by 1e3/8 to get bytes/msec.
2240 We don't want the credits to pass a credit
2241 of the t_fair*FAIR_MEM (algorithm resolution) */
2242 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2243 /* since each tick is 4 usec */
2244 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002245}
2246
Eilon Greenstein2691d512009-08-12 08:22:08 +00002247/* Calculates the sum of vn_min_rates.
2248 It's needed for further normalizing of the min_rates.
2249 Returns:
2250 sum of vn_min_rates.
2251 or
2252 0 - if all the min_rates are 0.
2253 In the later case fainess algorithm should be deactivated.
2254 If not all min_rates are zero then those that are zeroes will be set to 1.
2255 */
2256static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2257{
2258 int all_zero = 1;
2259 int port = BP_PORT(bp);
2260 int vn;
2261
2262 bp->vn_weight_sum = 0;
2263 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2264 int func = 2*vn + port;
2265 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2266 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2267 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2268
2269 /* Skip hidden vns */
2270 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2271 continue;
2272
2273 /* If min rate is zero - set it to 1 */
2274 if (!vn_min_rate)
2275 vn_min_rate = DEF_MIN_RATE;
2276 else
2277 all_zero = 0;
2278
2279 bp->vn_weight_sum += vn_min_rate;
2280 }
2281
2282 /* ... only if all min rates are zeros - disable fairness */
2283 if (all_zero)
2284 bp->vn_weight_sum = 0;
2285}
2286
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002287static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002288{
2289 struct rate_shaping_vars_per_vn m_rs_vn;
2290 struct fairness_vars_per_vn m_fair_vn;
2291 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2292 u16 vn_min_rate, vn_max_rate;
2293 int i;
2294
2295 /* If function is hidden - set min and max to zeroes */
2296 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2297 vn_min_rate = 0;
2298 vn_max_rate = 0;
2299
2300 } else {
2301 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2302 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002303 /* If fairness is enabled (not all min rates are zeroes) and
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002304 if current min rate is zero - set it to 1.
Eilon Greenstein33471622008-08-13 15:59:08 -07002305 This is a requirement of the algorithm. */
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002306 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002307 vn_min_rate = DEF_MIN_RATE;
2308 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2309 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2310 }
2311
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002312 DP(NETIF_MSG_IFUP,
2313 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2314 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002315
2316 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2317 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2318
2319 /* global vn counter - maximal Mbps for this vn */
2320 m_rs_vn.vn_counter.rate = vn_max_rate;
2321
2322 /* quota - number of bytes transmitted in this period */
2323 m_rs_vn.vn_counter.quota =
2324 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2325
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002326 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002327 /* credit for each period of the fairness algorithm:
2328 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002329 vn_weight_sum should not be larger than 10000, thus
2330 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2331 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002332 m_fair_vn.vn_credit_delta =
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002333 max((u32)(vn_min_rate * (T_FAIR_COEF /
2334 (8 * bp->vn_weight_sum))),
2335 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002336 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2337 m_fair_vn.vn_credit_delta);
2338 }
2339
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002340 /* Store it to internal memory */
2341 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2342 REG_WR(bp, BAR_XSTRORM_INTMEM +
2343 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2344 ((u32 *)(&m_rs_vn))[i]);
2345
2346 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2347 REG_WR(bp, BAR_XSTRORM_INTMEM +
2348 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2349 ((u32 *)(&m_fair_vn))[i]);
2350}
2351
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002352
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002353/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002354static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002355{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002356 /* Make sure that we are synced with the current statistics */
2357 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2358
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002359 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002360
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002361 if (bp->link_vars.link_up) {
2362
Eilon Greenstein1c063282009-02-12 08:36:43 +00002363 /* dropless flow control */
2364 if (CHIP_IS_E1H(bp)) {
2365 int port = BP_PORT(bp);
2366 u32 pause_enabled = 0;
2367
2368 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2369 pause_enabled = 1;
2370
2371 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002372 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002373 pause_enabled);
2374 }
2375
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002376 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2377 struct host_port_stats *pstats;
2378
2379 pstats = bnx2x_sp(bp, port_stats);
2380 /* reset old bmac stats */
2381 memset(&(pstats->mac_stx[0]), 0,
2382 sizeof(struct mac_stx));
2383 }
2384 if ((bp->state == BNX2X_STATE_OPEN) ||
2385 (bp->state == BNX2X_STATE_DISABLED))
2386 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2387 }
2388
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002389 /* indicate link status */
2390 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002391
2392 if (IS_E1HMF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002393 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002394 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002395 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002396
2397 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2398 if (vn == BP_E1HVN(bp))
2399 continue;
2400
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002401 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002402
2403 /* Set the attention towards other drivers
2404 on the same port */
2405 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2406 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2407 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002408
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002409 if (bp->link_vars.link_up) {
2410 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002411
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002412 /* Init rate shaping and fairness contexts */
2413 bnx2x_init_port_minmax(bp);
2414
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002415 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002416 bnx2x_init_vn_minmax(bp, 2*vn + port);
2417
2418 /* Store it to internal memory */
2419 for (i = 0;
2420 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2421 REG_WR(bp, BAR_XSTRORM_INTMEM +
2422 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2423 ((u32 *)(&bp->cmng))[i]);
2424 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002425 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002426}
2427
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002428static void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002429{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002430 int func = BP_FUNC(bp);
2431
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002432 if (bp->state != BNX2X_STATE_OPEN)
2433 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002434
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002435 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2436
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002437 if (bp->link_vars.link_up)
2438 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2439 else
2440 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2441
Eilon Greenstein2691d512009-08-12 08:22:08 +00002442 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2443 bnx2x_calc_vn_weight_sum(bp);
2444
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002445 /* indicate link status */
2446 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002447}
2448
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002449static void bnx2x_pmf_update(struct bnx2x *bp)
2450{
2451 int port = BP_PORT(bp);
2452 u32 val;
2453
2454 bp->port.pmf = 1;
2455 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2456
2457 /* enable nig attention */
2458 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2459 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2460 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002461
2462 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002463}
2464
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002465/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002466
2467/* slow path */
2468
2469/*
2470 * General service functions
2471 */
2472
Eilon Greenstein2691d512009-08-12 08:22:08 +00002473/* send the MCP a request, block until there is a reply */
2474u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2475{
2476 int func = BP_FUNC(bp);
2477 u32 seq = ++bp->fw_seq;
2478 u32 rc = 0;
2479 u32 cnt = 1;
2480 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2481
2482 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2483 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2484
2485 do {
2486 /* let the FW do it's magic ... */
2487 msleep(delay);
2488
2489 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2490
2491 /* Give the FW up to 2 second (200*10ms) */
2492 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2493
2494 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2495 cnt*delay, rc, seq);
2496
2497 /* is this a reply to our command? */
2498 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2499 rc &= FW_MSG_CODE_MASK;
2500 else {
2501 /* FW BUG! */
2502 BNX2X_ERR("FW failed to respond!\n");
2503 bnx2x_fw_dump(bp);
2504 rc = 0;
2505 }
2506
2507 return rc;
2508}
2509
2510static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2511static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2512static void bnx2x_set_rx_mode(struct net_device *dev);
2513
2514static void bnx2x_e1h_disable(struct bnx2x *bp)
2515{
2516 int port = BP_PORT(bp);
2517 int i;
2518
2519 bp->rx_mode = BNX2X_RX_MODE_NONE;
2520 bnx2x_set_storm_rx_mode(bp);
2521
2522 netif_tx_disable(bp->dev);
2523 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2524
2525 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2526
2527 bnx2x_set_mac_addr_e1h(bp, 0);
2528
2529 for (i = 0; i < MC_HASH_SIZE; i++)
2530 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2531
2532 netif_carrier_off(bp->dev);
2533}
2534
2535static void bnx2x_e1h_enable(struct bnx2x *bp)
2536{
2537 int port = BP_PORT(bp);
2538
2539 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2540
2541 bnx2x_set_mac_addr_e1h(bp, 1);
2542
2543 /* Tx queue should be only reenabled */
2544 netif_tx_wake_all_queues(bp->dev);
2545
2546 /* Initialize the receive filter. */
2547 bnx2x_set_rx_mode(bp->dev);
2548}
2549
2550static void bnx2x_update_min_max(struct bnx2x *bp)
2551{
2552 int port = BP_PORT(bp);
2553 int vn, i;
2554
2555 /* Init rate shaping and fairness contexts */
2556 bnx2x_init_port_minmax(bp);
2557
2558 bnx2x_calc_vn_weight_sum(bp);
2559
2560 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2561 bnx2x_init_vn_minmax(bp, 2*vn + port);
2562
2563 if (bp->port.pmf) {
2564 int func;
2565
2566 /* Set the attention towards other drivers on the same port */
2567 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2568 if (vn == BP_E1HVN(bp))
2569 continue;
2570
2571 func = ((vn << 1) | port);
2572 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2573 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2574 }
2575
2576 /* Store it to internal memory */
2577 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2578 REG_WR(bp, BAR_XSTRORM_INTMEM +
2579 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2580 ((u32 *)(&bp->cmng))[i]);
2581 }
2582}
2583
2584static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2585{
2586 int func = BP_FUNC(bp);
2587
2588 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2589 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2590
2591 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2592
2593 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2594 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2595 bp->state = BNX2X_STATE_DISABLED;
2596
2597 bnx2x_e1h_disable(bp);
2598 } else {
2599 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2600 bp->state = BNX2X_STATE_OPEN;
2601
2602 bnx2x_e1h_enable(bp);
2603 }
2604 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2605 }
2606 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2607
2608 bnx2x_update_min_max(bp);
2609 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2610 }
2611
2612 /* Report results to MCP */
2613 if (dcc_event)
2614 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2615 else
2616 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2617}
2618
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002619/* the slow path queue is odd since completions arrive on the fastpath ring */
2620static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2621 u32 data_hi, u32 data_lo, int common)
2622{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002623 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002624
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002625 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2626 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002627 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2628 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2629 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2630
2631#ifdef BNX2X_STOP_ON_ERROR
2632 if (unlikely(bp->panic))
2633 return -EIO;
2634#endif
2635
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002636 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002637
2638 if (!bp->spq_left) {
2639 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002640 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002641 bnx2x_panic();
2642 return -EBUSY;
2643 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002644
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002645 /* CID needs port number to be encoded int it */
2646 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2647 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2648 HW_CID(bp, cid)));
2649 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2650 if (common)
2651 bp->spq_prod_bd->hdr.type |=
2652 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2653
2654 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2655 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2656
2657 bp->spq_left--;
2658
2659 if (bp->spq_prod_bd == bp->spq_last_bd) {
2660 bp->spq_prod_bd = bp->spq;
2661 bp->spq_prod_idx = 0;
2662 DP(NETIF_MSG_TIMER, "end of spq\n");
2663
2664 } else {
2665 bp->spq_prod_bd++;
2666 bp->spq_prod_idx++;
2667 }
2668
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00002669 /* Make sure that BD data is updated before writing the producer */
2670 wmb();
2671
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002672 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002673 bp->spq_prod_idx);
2674
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00002675 mmiowb();
2676
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002677 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002678 return 0;
2679}
2680
2681/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002682static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002683{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002684 u32 i, j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002685 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002686
2687 might_sleep();
2688 i = 100;
2689 for (j = 0; j < i*10; j++) {
2690 val = (1UL << 31);
2691 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2692 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2693 if (val & (1L << 31))
2694 break;
2695
2696 msleep(5);
2697 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002698 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002699 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002700 rc = -EBUSY;
2701 }
2702
2703 return rc;
2704}
2705
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002706/* release split MCP access lock register */
2707static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002708{
2709 u32 val = 0;
2710
2711 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2712}
2713
2714static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2715{
2716 struct host_def_status_block *def_sb = bp->def_status_blk;
2717 u16 rc = 0;
2718
2719 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002720 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2721 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2722 rc |= 1;
2723 }
2724 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2725 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2726 rc |= 2;
2727 }
2728 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2729 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2730 rc |= 4;
2731 }
2732 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2733 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2734 rc |= 8;
2735 }
2736 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2737 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2738 rc |= 16;
2739 }
2740 return rc;
2741}
2742
2743/*
2744 * slow path service functions
2745 */
2746
2747static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2748{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002749 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002750 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2751 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002752 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2753 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002754 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2755 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002756 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002757 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002758
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002759 if (bp->attn_state & asserted)
2760 BNX2X_ERR("IGU ERROR\n");
2761
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002762 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2763 aeu_mask = REG_RD(bp, aeu_addr);
2764
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002765 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002766 aeu_mask, asserted);
2767 aeu_mask &= ~(asserted & 0xff);
2768 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002769
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002770 REG_WR(bp, aeu_addr, aeu_mask);
2771 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002772
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002773 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002774 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002775 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002776
2777 if (asserted & ATTN_HARD_WIRED_MASK) {
2778 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002779
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002780 bnx2x_acquire_phy_lock(bp);
2781
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002782 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002783 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002784 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002785
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002786 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002787
2788 /* handle unicore attn? */
2789 }
2790 if (asserted & ATTN_SW_TIMER_4_FUNC)
2791 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2792
2793 if (asserted & GPIO_2_FUNC)
2794 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2795
2796 if (asserted & GPIO_3_FUNC)
2797 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2798
2799 if (asserted & GPIO_4_FUNC)
2800 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2801
2802 if (port == 0) {
2803 if (asserted & ATTN_GENERAL_ATTN_1) {
2804 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2805 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2806 }
2807 if (asserted & ATTN_GENERAL_ATTN_2) {
2808 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2809 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2810 }
2811 if (asserted & ATTN_GENERAL_ATTN_3) {
2812 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2813 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2814 }
2815 } else {
2816 if (asserted & ATTN_GENERAL_ATTN_4) {
2817 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2818 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2819 }
2820 if (asserted & ATTN_GENERAL_ATTN_5) {
2821 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2822 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2823 }
2824 if (asserted & ATTN_GENERAL_ATTN_6) {
2825 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2826 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2827 }
2828 }
2829
2830 } /* if hardwired */
2831
Eilon Greenstein5c862842008-08-13 15:51:48 -07002832 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2833 asserted, hc_addr);
2834 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002835
2836 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002837 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002838 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002839 bnx2x_release_phy_lock(bp);
2840 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002841}
2842
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002843static inline void bnx2x_fan_failure(struct bnx2x *bp)
2844{
2845 int port = BP_PORT(bp);
2846
2847 /* mark the failure */
2848 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2849 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2850 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2851 bp->link_params.ext_phy_config);
2852
2853 /* log the failure */
2854 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2855 " the driver to shutdown the card to prevent permanent"
2856 " damage. Please contact Dell Support for assistance\n",
2857 bp->dev->name);
2858}
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002859static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2860{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002861 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002862 int reg_offset;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002863 u32 val, swap_val, swap_override;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002864
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002865 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2866 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002867
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002868 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002869
2870 val = REG_RD(bp, reg_offset);
2871 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2872 REG_WR(bp, reg_offset, val);
2873
2874 BNX2X_ERR("SPIO5 hw attention\n");
2875
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002876 /* Fan failure attention */
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00002877 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2878 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002879 /* Low power mode is controlled by GPIO 2 */
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002880 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
Eilon Greenstein17de50b2008-08-13 15:56:59 -07002881 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002882 /* The PHY reset is controlled by GPIO 1 */
2883 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2884 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002885 break;
2886
Eilon Greenstein4d295db2009-07-21 05:47:47 +00002887 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2888 /* The PHY reset is controlled by GPIO 1 */
2889 /* fake the port number to cancel the swap done in
2890 set_gpio() */
2891 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2892 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2893 port = (swap_val && swap_override) ^ 1;
2894 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2895 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2896 break;
2897
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002898 default:
2899 break;
2900 }
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002901 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002902 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002903
Eilon Greenstein589abe32009-02-12 08:36:55 +00002904 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2905 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2906 bnx2x_acquire_phy_lock(bp);
2907 bnx2x_handle_module_detect_int(&bp->link_params);
2908 bnx2x_release_phy_lock(bp);
2909 }
2910
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002911 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2912
2913 val = REG_RD(bp, reg_offset);
2914 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2915 REG_WR(bp, reg_offset, val);
2916
2917 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2918 (attn & HW_INTERRUT_ASSERT_SET_0));
2919 bnx2x_panic();
2920 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002921}
2922
2923static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2924{
2925 u32 val;
2926
Eilon Greenstein0626b892009-02-12 08:38:14 +00002927 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002928
2929 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2930 BNX2X_ERR("DB hw attention 0x%x\n", val);
2931 /* DORQ discard attention */
2932 if (val & 0x2)
2933 BNX2X_ERR("FATAL error from DORQ\n");
2934 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002935
2936 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2937
2938 int port = BP_PORT(bp);
2939 int reg_offset;
2940
2941 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2942 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2943
2944 val = REG_RD(bp, reg_offset);
2945 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2946 REG_WR(bp, reg_offset, val);
2947
2948 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2949 (attn & HW_INTERRUT_ASSERT_SET_1));
2950 bnx2x_panic();
2951 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002952}
2953
2954static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2955{
2956 u32 val;
2957
2958 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2959
2960 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2961 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2962 /* CFC error attention */
2963 if (val & 0x2)
2964 BNX2X_ERR("FATAL error from CFC\n");
2965 }
2966
2967 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2968
2969 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2970 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2971 /* RQ_USDMDP_FIFO_OVERFLOW */
2972 if (val & 0x18000)
2973 BNX2X_ERR("FATAL error from PXP\n");
2974 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002975
2976 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2977
2978 int port = BP_PORT(bp);
2979 int reg_offset;
2980
2981 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2982 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2983
2984 val = REG_RD(bp, reg_offset);
2985 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2986 REG_WR(bp, reg_offset, val);
2987
2988 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2989 (attn & HW_INTERRUT_ASSERT_SET_2));
2990 bnx2x_panic();
2991 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002992}
2993
2994static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2995{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002996 u32 val;
2997
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002998 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2999
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003000 if (attn & BNX2X_PMF_LINK_ASSERT) {
3001 int func = BP_FUNC(bp);
3002
3003 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003004 val = SHMEM_RD(bp, func_mb[func].drv_status);
3005 if (val & DRV_STATUS_DCC_EVENT_MASK)
3006 bnx2x_dcc_event(bp,
3007 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003008 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003009 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003010 bnx2x_pmf_update(bp);
3011
3012 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003013
3014 BNX2X_ERR("MC assert!\n");
3015 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3016 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3017 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3018 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3019 bnx2x_panic();
3020
3021 } else if (attn & BNX2X_MCP_ASSERT) {
3022
3023 BNX2X_ERR("MCP assert!\n");
3024 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003025 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003026
3027 } else
3028 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3029 }
3030
3031 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003032 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3033 if (attn & BNX2X_GRC_TIMEOUT) {
3034 val = CHIP_IS_E1H(bp) ?
3035 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3036 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3037 }
3038 if (attn & BNX2X_GRC_RSV) {
3039 val = CHIP_IS_E1H(bp) ?
3040 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3041 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3042 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003043 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003044 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003045}
3046
3047static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3048{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003049 struct attn_route attn;
3050 struct attn_route group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003051 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003052 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003053 u32 reg_addr;
3054 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003055 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003056
3057 /* need to take HW lock because MCP or other port might also
3058 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003059 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003060
3061 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3062 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3063 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3064 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003065 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3066 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003067
3068 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3069 if (deasserted & (1 << index)) {
3070 group_mask = bp->attn_group[index];
3071
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003072 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3073 index, group_mask.sig[0], group_mask.sig[1],
3074 group_mask.sig[2], group_mask.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003075
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003076 bnx2x_attn_int_deasserted3(bp,
3077 attn.sig[3] & group_mask.sig[3]);
3078 bnx2x_attn_int_deasserted1(bp,
3079 attn.sig[1] & group_mask.sig[1]);
3080 bnx2x_attn_int_deasserted2(bp,
3081 attn.sig[2] & group_mask.sig[2]);
3082 bnx2x_attn_int_deasserted0(bp,
3083 attn.sig[0] & group_mask.sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003084
3085 if ((attn.sig[0] & group_mask.sig[0] &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003086 HW_PRTY_ASSERT_SET_0) ||
3087 (attn.sig[1] & group_mask.sig[1] &
3088 HW_PRTY_ASSERT_SET_1) ||
3089 (attn.sig[2] & group_mask.sig[2] &
3090 HW_PRTY_ASSERT_SET_2))
Eilon Greenstein6378c022008-08-13 15:59:25 -07003091 BNX2X_ERR("FATAL HW block parity attention\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003092 }
3093 }
3094
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003095 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003096
Eilon Greenstein5c862842008-08-13 15:51:48 -07003097 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003098
3099 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003100 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3101 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003102 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003103
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003104 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003105 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003106
3107 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3108 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3109
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003110 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3111 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003112
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003113 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3114 aeu_mask, deasserted);
3115 aeu_mask |= (deasserted & 0xff);
3116 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3117
3118 REG_WR(bp, reg_addr, aeu_mask);
3119 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003120
3121 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3122 bp->attn_state &= ~deasserted;
3123 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3124}
3125
3126static void bnx2x_attn_int(struct bnx2x *bp)
3127{
3128 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003129 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3130 attn_bits);
3131 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3132 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003133 u32 attn_state = bp->attn_state;
3134
3135 /* look for changed bits */
3136 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3137 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3138
3139 DP(NETIF_MSG_HW,
3140 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3141 attn_bits, attn_ack, asserted, deasserted);
3142
3143 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003144 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003145
3146 /* handle bits that were raised */
3147 if (asserted)
3148 bnx2x_attn_int_asserted(bp, asserted);
3149
3150 if (deasserted)
3151 bnx2x_attn_int_deasserted(bp, deasserted);
3152}
3153
3154static void bnx2x_sp_task(struct work_struct *work)
3155{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003156 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003157 u16 status;
3158
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003159
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003160 /* Return here if interrupt is disabled */
3161 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003162 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003163 return;
3164 }
3165
3166 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003167/* if (status == 0) */
3168/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003169
Eilon Greenstein3196a882008-08-13 15:58:49 -07003170 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003171
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003172 /* HW attentions */
3173 if (status & 0x1)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003174 bnx2x_attn_int(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003175
Eilon Greenstein68d59482009-01-14 21:27:36 -08003176 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003177 IGU_INT_NOP, 1);
3178 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3179 IGU_INT_NOP, 1);
3180 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3181 IGU_INT_NOP, 1);
3182 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3183 IGU_INT_NOP, 1);
3184 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3185 IGU_INT_ENABLE, 1);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003186
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003187}
3188
3189static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3190{
3191 struct net_device *dev = dev_instance;
3192 struct bnx2x *bp = netdev_priv(dev);
3193
3194 /* Return here if interrupt is disabled */
3195 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003196 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003197 return IRQ_HANDLED;
3198 }
3199
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003200 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003201
3202#ifdef BNX2X_STOP_ON_ERROR
3203 if (unlikely(bp->panic))
3204 return IRQ_HANDLED;
3205#endif
3206
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003207 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003208
3209 return IRQ_HANDLED;
3210}
3211
3212/* end of slow path */
3213
3214/* Statistics */
3215
3216/****************************************************************************
3217* Macros
3218****************************************************************************/
3219
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003220/* sum[hi:lo] += add[hi:lo] */
3221#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3222 do { \
3223 s_lo += a_lo; \
Eilon Greensteinf5ba6772009-01-14 21:29:18 -08003224 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003225 } while (0)
3226
3227/* difference = minuend - subtrahend */
3228#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3229 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003230 if (m_lo < s_lo) { \
3231 /* underflow */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003232 d_hi = m_hi - s_hi; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003233 if (d_hi > 0) { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003234 /* we can 'loan' 1 */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003235 d_hi--; \
3236 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003237 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003238 /* m_hi <= s_hi */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003239 d_hi = 0; \
3240 d_lo = 0; \
3241 } \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003242 } else { \
3243 /* m_lo >= s_lo */ \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003244 if (m_hi < s_hi) { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003245 d_hi = 0; \
3246 d_lo = 0; \
3247 } else { \
Eilon Greenstein6378c022008-08-13 15:59:25 -07003248 /* m_hi >= s_hi */ \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003249 d_hi = m_hi - s_hi; \
3250 d_lo = m_lo - s_lo; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003251 } \
3252 } \
3253 } while (0)
3254
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003255#define UPDATE_STAT64(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003256 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003257 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3258 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3259 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3260 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3261 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3262 pstats->mac_stx[1].t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003263 } while (0)
3264
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003265#define UPDATE_STAT64_NIG(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003266 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003267 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3268 diff.lo, new->s##_lo, old->s##_lo); \
3269 ADD_64(estats->t##_hi, diff.hi, \
3270 estats->t##_lo, diff.lo); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003271 } while (0)
3272
3273/* sum[hi:lo] += add */
3274#define ADD_EXTEND_64(s_hi, s_lo, a) \
3275 do { \
3276 s_lo += a; \
3277 s_hi += (s_lo < a) ? 1 : 0; \
3278 } while (0)
3279
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003280#define UPDATE_EXTEND_STAT(s) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003281 do { \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003282 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3283 pstats->mac_stx[1].s##_lo, \
3284 new->s); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003285 } while (0)
3286
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003287#define UPDATE_EXTEND_TSTAT(s, t) \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003288 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003289 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3290 old_tclient->s = tclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003291 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3292 } while (0)
3293
3294#define UPDATE_EXTEND_USTAT(s, t) \
3295 do { \
3296 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3297 old_uclient->s = uclient->s; \
3298 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003299 } while (0)
3300
3301#define UPDATE_EXTEND_XSTAT(s, t) \
3302 do { \
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003303 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3304 old_xclient->s = xclient->s; \
Eilon Greensteinde832a52009-02-12 08:36:33 +00003305 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3306 } while (0)
3307
3308/* minuend -= subtrahend */
3309#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3310 do { \
3311 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3312 } while (0)
3313
3314/* minuend[hi:lo] -= subtrahend */
3315#define SUB_EXTEND_64(m_hi, m_lo, s) \
3316 do { \
3317 SUB_64(m_hi, 0, m_lo, s); \
3318 } while (0)
3319
3320#define SUB_EXTEND_USTAT(s, t) \
3321 do { \
3322 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3323 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003324 } while (0)
3325
3326/*
3327 * General service functions
3328 */
3329
3330static inline long bnx2x_hilo(u32 *hiref)
3331{
3332 u32 lo = *(hiref + 1);
3333#if (BITS_PER_LONG == 64)
3334 u32 hi = *hiref;
3335
3336 return HILO_U64(hi, lo);
3337#else
3338 return lo;
3339#endif
3340}
3341
3342/*
3343 * Init service functions
3344 */
3345
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003346static void bnx2x_storm_stats_post(struct bnx2x *bp)
3347{
3348 if (!bp->stats_pending) {
3349 struct eth_query_ramrod_data ramrod_data = {0};
Eilon Greensteinde832a52009-02-12 08:36:33 +00003350 int i, rc;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003351
3352 ramrod_data.drv_counter = bp->stats_counter++;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08003353 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003354 for_each_queue(bp, i)
3355 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003356
3357 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3358 ((u32 *)&ramrod_data)[1],
3359 ((u32 *)&ramrod_data)[0], 0);
3360 if (rc == 0) {
3361 /* stats ramrod has it's own slot on the spq */
3362 bp->spq_left++;
3363 bp->stats_pending = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003364 }
3365 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003366}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003367
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003368static void bnx2x_hw_stats_post(struct bnx2x *bp)
3369{
3370 struct dmae_command *dmae = &bp->stats_dmae;
3371 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3372
3373 *stats_comp = DMAE_COMP_VAL;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003374 if (CHIP_REV_IS_SLOW(bp))
3375 return;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003376
3377 /* loader */
3378 if (bp->executer_idx) {
3379 int loader_idx = PMF_DMAE_C(bp);
3380
3381 memset(dmae, 0, sizeof(struct dmae_command));
3382
3383 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3384 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3385 DMAE_CMD_DST_RESET |
3386#ifdef __BIG_ENDIAN
3387 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3388#else
3389 DMAE_CMD_ENDIANITY_DW_SWAP |
3390#endif
3391 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3392 DMAE_CMD_PORT_0) |
3393 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3394 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3395 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3396 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3397 sizeof(struct dmae_command) *
3398 (loader_idx + 1)) >> 2;
3399 dmae->dst_addr_hi = 0;
3400 dmae->len = sizeof(struct dmae_command) >> 2;
3401 if (CHIP_IS_E1(bp))
3402 dmae->len--;
3403 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3404 dmae->comp_addr_hi = 0;
3405 dmae->comp_val = 1;
3406
3407 *stats_comp = 0;
3408 bnx2x_post_dmae(bp, dmae, loader_idx);
3409
3410 } else if (bp->func_stx) {
3411 *stats_comp = 0;
3412 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3413 }
3414}
3415
3416static int bnx2x_stats_comp(struct bnx2x *bp)
3417{
3418 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3419 int cnt = 10;
3420
3421 might_sleep();
3422 while (*stats_comp != DMAE_COMP_VAL) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003423 if (!cnt) {
3424 BNX2X_ERR("timeout waiting for stats finished\n");
3425 break;
3426 }
3427 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -07003428 msleep(1);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003429 }
3430 return 1;
3431}
3432
3433/*
3434 * Statistics service functions
3435 */
3436
3437static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3438{
3439 struct dmae_command *dmae;
3440 u32 opcode;
3441 int loader_idx = PMF_DMAE_C(bp);
3442 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3443
3444 /* sanity */
3445 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3446 BNX2X_ERR("BUG!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003447 return;
3448 }
3449
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003450 bp->executer_idx = 0;
3451
3452 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3453 DMAE_CMD_C_ENABLE |
3454 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3455#ifdef __BIG_ENDIAN
3456 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3457#else
3458 DMAE_CMD_ENDIANITY_DW_SWAP |
3459#endif
3460 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3461 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3462
3463 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3464 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3465 dmae->src_addr_lo = bp->port.port_stx >> 2;
3466 dmae->src_addr_hi = 0;
3467 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3468 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3469 dmae->len = DMAE_LEN32_RD_MAX;
3470 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3471 dmae->comp_addr_hi = 0;
3472 dmae->comp_val = 1;
3473
3474 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3475 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3476 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3477 dmae->src_addr_hi = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07003478 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3479 DMAE_LEN32_RD_MAX * 4);
3480 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3481 DMAE_LEN32_RD_MAX * 4);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003482 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3483 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3484 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3485 dmae->comp_val = DMAE_COMP_VAL;
3486
3487 *stats_comp = 0;
3488 bnx2x_hw_stats_post(bp);
3489 bnx2x_stats_comp(bp);
3490}
3491
3492static void bnx2x_port_stats_init(struct bnx2x *bp)
3493{
3494 struct dmae_command *dmae;
3495 int port = BP_PORT(bp);
3496 int vn = BP_E1HVN(bp);
3497 u32 opcode;
3498 int loader_idx = PMF_DMAE_C(bp);
3499 u32 mac_addr;
3500 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3501
3502 /* sanity */
3503 if (!bp->link_vars.link_up || !bp->port.pmf) {
3504 BNX2X_ERR("BUG!\n");
3505 return;
3506 }
3507
3508 bp->executer_idx = 0;
3509
3510 /* MCP */
3511 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3512 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3513 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3514#ifdef __BIG_ENDIAN
3515 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3516#else
3517 DMAE_CMD_ENDIANITY_DW_SWAP |
3518#endif
3519 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3520 (vn << DMAE_CMD_E1HVN_SHIFT));
3521
3522 if (bp->port.port_stx) {
3523
3524 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3525 dmae->opcode = opcode;
3526 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3527 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3528 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3529 dmae->dst_addr_hi = 0;
3530 dmae->len = sizeof(struct host_port_stats) >> 2;
3531 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3532 dmae->comp_addr_hi = 0;
3533 dmae->comp_val = 1;
3534 }
3535
3536 if (bp->func_stx) {
3537
3538 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3539 dmae->opcode = opcode;
3540 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3541 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3542 dmae->dst_addr_lo = bp->func_stx >> 2;
3543 dmae->dst_addr_hi = 0;
3544 dmae->len = sizeof(struct host_func_stats) >> 2;
3545 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3546 dmae->comp_addr_hi = 0;
3547 dmae->comp_val = 1;
3548 }
3549
3550 /* MAC */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003551 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3552 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3553 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3554#ifdef __BIG_ENDIAN
3555 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3556#else
3557 DMAE_CMD_ENDIANITY_DW_SWAP |
3558#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003559 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3560 (vn << DMAE_CMD_E1HVN_SHIFT));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003561
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003562 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003563
3564 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3565 NIG_REG_INGRESS_BMAC0_MEM);
3566
3567 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3568 BIGMAC_REGISTER_TX_STAT_GTBYT */
3569 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3570 dmae->opcode = opcode;
3571 dmae->src_addr_lo = (mac_addr +
3572 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3573 dmae->src_addr_hi = 0;
3574 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3575 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3576 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3577 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3578 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3579 dmae->comp_addr_hi = 0;
3580 dmae->comp_val = 1;
3581
3582 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3583 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3584 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3585 dmae->opcode = opcode;
3586 dmae->src_addr_lo = (mac_addr +
3587 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3588 dmae->src_addr_hi = 0;
3589 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003590 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003591 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003592 offsetof(struct bmac_stats, rx_stat_gr64_lo));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003593 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3594 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3595 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3596 dmae->comp_addr_hi = 0;
3597 dmae->comp_val = 1;
3598
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07003599 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003600
3601 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3602
3603 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3604 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3605 dmae->opcode = opcode;
3606 dmae->src_addr_lo = (mac_addr +
3607 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3608 dmae->src_addr_hi = 0;
3609 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3610 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3611 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3612 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3613 dmae->comp_addr_hi = 0;
3614 dmae->comp_val = 1;
3615
3616 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3617 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3618 dmae->opcode = opcode;
3619 dmae->src_addr_lo = (mac_addr +
3620 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3621 dmae->src_addr_hi = 0;
3622 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003623 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003624 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003625 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003626 dmae->len = 1;
3627 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3628 dmae->comp_addr_hi = 0;
3629 dmae->comp_val = 1;
3630
3631 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3632 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3633 dmae->opcode = opcode;
3634 dmae->src_addr_lo = (mac_addr +
3635 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3636 dmae->src_addr_hi = 0;
3637 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003638 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003639 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003640 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003641 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3642 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3643 dmae->comp_addr_hi = 0;
3644 dmae->comp_val = 1;
3645 }
3646
3647 /* NIG */
3648 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003649 dmae->opcode = opcode;
3650 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3651 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3652 dmae->src_addr_hi = 0;
3653 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3654 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3655 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3656 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3657 dmae->comp_addr_hi = 0;
3658 dmae->comp_val = 1;
3659
3660 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3661 dmae->opcode = opcode;
3662 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3663 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3664 dmae->src_addr_hi = 0;
3665 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3666 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3667 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3668 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3669 dmae->len = (2*sizeof(u32)) >> 2;
3670 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3671 dmae->comp_addr_hi = 0;
3672 dmae->comp_val = 1;
3673
3674 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003675 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3676 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3677 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3678#ifdef __BIG_ENDIAN
3679 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3680#else
3681 DMAE_CMD_ENDIANITY_DW_SWAP |
3682#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003683 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3684 (vn << DMAE_CMD_E1HVN_SHIFT));
3685 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3686 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003687 dmae->src_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003688 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3689 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3690 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3691 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3692 dmae->len = (2*sizeof(u32)) >> 2;
3693 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3694 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3695 dmae->comp_val = DMAE_COMP_VAL;
3696
3697 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003698}
3699
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003700static void bnx2x_func_stats_init(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003701{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003702 struct dmae_command *dmae = &bp->stats_dmae;
3703 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003704
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003705 /* sanity */
3706 if (!bp->func_stx) {
3707 BNX2X_ERR("BUG!\n");
3708 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003709 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003710
3711 bp->executer_idx = 0;
3712 memset(dmae, 0, sizeof(struct dmae_command));
3713
3714 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3715 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3716 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3717#ifdef __BIG_ENDIAN
3718 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3719#else
3720 DMAE_CMD_ENDIANITY_DW_SWAP |
3721#endif
3722 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3723 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3724 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3725 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3726 dmae->dst_addr_lo = bp->func_stx >> 2;
3727 dmae->dst_addr_hi = 0;
3728 dmae->len = sizeof(struct host_func_stats) >> 2;
3729 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3730 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3731 dmae->comp_val = DMAE_COMP_VAL;
3732
3733 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003734}
3735
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003736static void bnx2x_stats_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003737{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003738 if (bp->port.pmf)
3739 bnx2x_port_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003740
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003741 else if (bp->func_stx)
3742 bnx2x_func_stats_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003743
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003744 bnx2x_hw_stats_post(bp);
3745 bnx2x_storm_stats_post(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003746}
3747
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003748static void bnx2x_stats_pmf_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003749{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003750 bnx2x_stats_comp(bp);
3751 bnx2x_stats_pmf_update(bp);
3752 bnx2x_stats_start(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003753}
3754
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003755static void bnx2x_stats_restart(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003756{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003757 bnx2x_stats_comp(bp);
3758 bnx2x_stats_start(bp);
3759}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003760
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003761static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3762{
3763 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3764 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003765 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003766 struct {
3767 u32 lo;
3768 u32 hi;
3769 } diff;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003770
3771 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3772 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3773 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3774 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3775 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3776 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003777 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003778 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003779 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003780 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3781 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3782 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3783 UPDATE_STAT64(tx_stat_gt127,
3784 tx_stat_etherstatspkts65octetsto127octets);
3785 UPDATE_STAT64(tx_stat_gt255,
3786 tx_stat_etherstatspkts128octetsto255octets);
3787 UPDATE_STAT64(tx_stat_gt511,
3788 tx_stat_etherstatspkts256octetsto511octets);
3789 UPDATE_STAT64(tx_stat_gt1023,
3790 tx_stat_etherstatspkts512octetsto1023octets);
3791 UPDATE_STAT64(tx_stat_gt1518,
3792 tx_stat_etherstatspkts1024octetsto1522octets);
3793 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3794 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3795 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3796 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3797 UPDATE_STAT64(tx_stat_gterr,
3798 tx_stat_dot3statsinternalmactransmiterrors);
3799 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003800
3801 estats->pause_frames_received_hi =
3802 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3803 estats->pause_frames_received_lo =
3804 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3805
3806 estats->pause_frames_sent_hi =
3807 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3808 estats->pause_frames_sent_lo =
3809 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003810}
3811
3812static void bnx2x_emac_stats_update(struct bnx2x *bp)
3813{
3814 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3815 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003816 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003817
3818 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3819 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3820 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3821 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3822 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3823 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3824 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3825 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3826 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3827 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3828 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3829 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3830 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3831 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3832 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3833 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3834 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3835 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3836 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3837 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3838 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3839 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3840 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3841 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3842 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3843 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3844 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3845 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3846 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3847 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3848 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003849
3850 estats->pause_frames_received_hi =
3851 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3852 estats->pause_frames_received_lo =
3853 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3854 ADD_64(estats->pause_frames_received_hi,
3855 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3856 estats->pause_frames_received_lo,
3857 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3858
3859 estats->pause_frames_sent_hi =
3860 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3861 estats->pause_frames_sent_lo =
3862 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3863 ADD_64(estats->pause_frames_sent_hi,
3864 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3865 estats->pause_frames_sent_lo,
3866 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003867}
3868
3869static int bnx2x_hw_stats_update(struct bnx2x *bp)
3870{
3871 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3872 struct nig_stats *old = &(bp->port.old_nig_stats);
3873 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3874 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00003875 struct {
3876 u32 lo;
3877 u32 hi;
3878 } diff;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003879 u32 nig_timer_max;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003880
3881 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3882 bnx2x_bmac_stats_update(bp);
3883
3884 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3885 bnx2x_emac_stats_update(bp);
3886
3887 else { /* unreached */
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00003888 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003889 return -1;
3890 }
3891
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003892 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3893 new->brb_discard - old->brb_discard);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003894 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3895 new->brb_truncate - old->brb_truncate);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003896
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003897 UPDATE_STAT64_NIG(egress_mac_pkt0,
3898 etherstatspkts1024octetsto1522octets);
3899 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003900
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003901 memcpy(old, new, sizeof(struct nig_stats));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003902
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003903 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3904 sizeof(struct mac_stx));
3905 estats->brb_drop_hi = pstats->brb_drop_hi;
3906 estats->brb_drop_lo = pstats->brb_drop_lo;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003907
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003908 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003909
Eilon Greensteinde832a52009-02-12 08:36:33 +00003910 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3911 if (nig_timer_max != estats->nig_timer_max) {
3912 estats->nig_timer_max = nig_timer_max;
3913 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3914 }
3915
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003916 return 0;
3917}
3918
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003919static int bnx2x_storm_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003920{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003921 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003922 struct tstorm_per_port_stats *tport =
Eilon Greensteinde832a52009-02-12 08:36:33 +00003923 &stats->tstorm_common.port_statistics;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003924 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3925 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003926 int i;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003927
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00003928 memcpy(&(fstats->total_bytes_received_hi),
3929 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +00003930 sizeof(struct host_func_stats) - 2*sizeof(u32));
3931 estats->error_bytes_received_hi = 0;
3932 estats->error_bytes_received_lo = 0;
3933 estats->etherstatsoverrsizepkts_hi = 0;
3934 estats->etherstatsoverrsizepkts_lo = 0;
3935 estats->no_buff_discard_hi = 0;
3936 estats->no_buff_discard_lo = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003937
Eilon Greensteinca003922009-08-12 22:53:28 -07003938 for_each_rx_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00003939 struct bnx2x_fastpath *fp = &bp->fp[i];
3940 int cl_id = fp->cl_id;
3941 struct tstorm_per_client_stats *tclient =
3942 &stats->tstorm_common.client_statistics[cl_id];
3943 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3944 struct ustorm_per_client_stats *uclient =
3945 &stats->ustorm_common.client_statistics[cl_id];
3946 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3947 struct xstorm_per_client_stats *xclient =
3948 &stats->xstorm_common.client_statistics[cl_id];
3949 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3950 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3951 u32 diff;
3952
3953 /* are storm stats valid? */
3954 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3955 bp->stats_counter) {
3956 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3957 " xstorm counter (%d) != stats_counter (%d)\n",
3958 i, xclient->stats_counter, bp->stats_counter);
3959 return -1;
3960 }
3961 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3962 bp->stats_counter) {
3963 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3964 " tstorm counter (%d) != stats_counter (%d)\n",
3965 i, tclient->stats_counter, bp->stats_counter);
3966 return -2;
3967 }
3968 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3969 bp->stats_counter) {
3970 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3971 " ustorm counter (%d) != stats_counter (%d)\n",
3972 i, uclient->stats_counter, bp->stats_counter);
3973 return -4;
3974 }
3975
3976 qstats->total_bytes_received_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07003977 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003978 qstats->total_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07003979 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3980
3981 ADD_64(qstats->total_bytes_received_hi,
3982 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3983 qstats->total_bytes_received_lo,
3984 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3985
3986 ADD_64(qstats->total_bytes_received_hi,
3987 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3988 qstats->total_bytes_received_lo,
3989 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3990
3991 qstats->valid_bytes_received_hi =
3992 qstats->total_bytes_received_hi;
Eilon Greensteinde832a52009-02-12 08:36:33 +00003993 qstats->valid_bytes_received_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07003994 qstats->total_bytes_received_lo;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003995
Eilon Greensteinde832a52009-02-12 08:36:33 +00003996 qstats->error_bytes_received_hi =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003997 le32_to_cpu(tclient->rcv_error_bytes.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00003998 qstats->error_bytes_received_lo =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003999 le32_to_cpu(tclient->rcv_error_bytes.lo);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004000
4001 ADD_64(qstats->total_bytes_received_hi,
4002 qstats->error_bytes_received_hi,
4003 qstats->total_bytes_received_lo,
4004 qstats->error_bytes_received_lo);
4005
4006 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4007 total_unicast_packets_received);
4008 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4009 total_multicast_packets_received);
4010 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4011 total_broadcast_packets_received);
4012 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4013 etherstatsoverrsizepkts);
4014 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4015
4016 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4017 total_unicast_packets_received);
4018 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4019 total_multicast_packets_received);
4020 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4021 total_broadcast_packets_received);
4022 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4023 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4024 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4025
4026 qstats->total_bytes_transmitted_hi =
Eilon Greensteinca003922009-08-12 22:53:28 -07004027 le32_to_cpu(xclient->unicast_bytes_sent.hi);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004028 qstats->total_bytes_transmitted_lo =
Eilon Greensteinca003922009-08-12 22:53:28 -07004029 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4030
4031 ADD_64(qstats->total_bytes_transmitted_hi,
4032 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4033 qstats->total_bytes_transmitted_lo,
4034 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4035
4036 ADD_64(qstats->total_bytes_transmitted_hi,
4037 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4038 qstats->total_bytes_transmitted_lo,
4039 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
Eilon Greensteinde832a52009-02-12 08:36:33 +00004040
4041 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4042 total_unicast_packets_transmitted);
4043 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4044 total_multicast_packets_transmitted);
4045 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4046 total_broadcast_packets_transmitted);
4047
4048 old_tclient->checksum_discard = tclient->checksum_discard;
4049 old_tclient->ttl0_discard = tclient->ttl0_discard;
4050
4051 ADD_64(fstats->total_bytes_received_hi,
4052 qstats->total_bytes_received_hi,
4053 fstats->total_bytes_received_lo,
4054 qstats->total_bytes_received_lo);
4055 ADD_64(fstats->total_bytes_transmitted_hi,
4056 qstats->total_bytes_transmitted_hi,
4057 fstats->total_bytes_transmitted_lo,
4058 qstats->total_bytes_transmitted_lo);
4059 ADD_64(fstats->total_unicast_packets_received_hi,
4060 qstats->total_unicast_packets_received_hi,
4061 fstats->total_unicast_packets_received_lo,
4062 qstats->total_unicast_packets_received_lo);
4063 ADD_64(fstats->total_multicast_packets_received_hi,
4064 qstats->total_multicast_packets_received_hi,
4065 fstats->total_multicast_packets_received_lo,
4066 qstats->total_multicast_packets_received_lo);
4067 ADD_64(fstats->total_broadcast_packets_received_hi,
4068 qstats->total_broadcast_packets_received_hi,
4069 fstats->total_broadcast_packets_received_lo,
4070 qstats->total_broadcast_packets_received_lo);
4071 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4072 qstats->total_unicast_packets_transmitted_hi,
4073 fstats->total_unicast_packets_transmitted_lo,
4074 qstats->total_unicast_packets_transmitted_lo);
4075 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4076 qstats->total_multicast_packets_transmitted_hi,
4077 fstats->total_multicast_packets_transmitted_lo,
4078 qstats->total_multicast_packets_transmitted_lo);
4079 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4080 qstats->total_broadcast_packets_transmitted_hi,
4081 fstats->total_broadcast_packets_transmitted_lo,
4082 qstats->total_broadcast_packets_transmitted_lo);
4083 ADD_64(fstats->valid_bytes_received_hi,
4084 qstats->valid_bytes_received_hi,
4085 fstats->valid_bytes_received_lo,
4086 qstats->valid_bytes_received_lo);
4087
4088 ADD_64(estats->error_bytes_received_hi,
4089 qstats->error_bytes_received_hi,
4090 estats->error_bytes_received_lo,
4091 qstats->error_bytes_received_lo);
4092 ADD_64(estats->etherstatsoverrsizepkts_hi,
4093 qstats->etherstatsoverrsizepkts_hi,
4094 estats->etherstatsoverrsizepkts_lo,
4095 qstats->etherstatsoverrsizepkts_lo);
4096 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4097 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4098 }
4099
4100 ADD_64(fstats->total_bytes_received_hi,
4101 estats->rx_stat_ifhcinbadoctets_hi,
4102 fstats->total_bytes_received_lo,
4103 estats->rx_stat_ifhcinbadoctets_lo);
4104
4105 memcpy(estats, &(fstats->total_bytes_received_hi),
4106 sizeof(struct host_func_stats) - 2*sizeof(u32));
4107
4108 ADD_64(estats->etherstatsoverrsizepkts_hi,
4109 estats->rx_stat_dot3statsframestoolong_hi,
4110 estats->etherstatsoverrsizepkts_lo,
4111 estats->rx_stat_dot3statsframestoolong_lo);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004112 ADD_64(estats->error_bytes_received_hi,
4113 estats->rx_stat_ifhcinbadoctets_hi,
4114 estats->error_bytes_received_lo,
4115 estats->rx_stat_ifhcinbadoctets_lo);
4116
Eilon Greensteinde832a52009-02-12 08:36:33 +00004117 if (bp->port.pmf) {
4118 estats->mac_filter_discard =
4119 le32_to_cpu(tport->mac_filter_discard);
4120 estats->xxoverflow_discard =
4121 le32_to_cpu(tport->xxoverflow_discard);
4122 estats->brb_truncate_discard =
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004123 le32_to_cpu(tport->brb_truncate_discard);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004124 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4125 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004126
4127 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4128
Eilon Greensteinde832a52009-02-12 08:36:33 +00004129 bp->stats_pending = 0;
4130
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004131 return 0;
4132}
4133
4134static void bnx2x_net_stats_update(struct bnx2x *bp)
4135{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004136 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004137 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greensteinde832a52009-02-12 08:36:33 +00004138 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004139
4140 nstats->rx_packets =
4141 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4142 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4143 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4144
4145 nstats->tx_packets =
4146 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4147 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4148 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4149
Eilon Greensteinde832a52009-02-12 08:36:33 +00004150 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004151
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004152 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004153
Eilon Greensteinde832a52009-02-12 08:36:33 +00004154 nstats->rx_dropped = estats->mac_discard;
Eilon Greensteinca003922009-08-12 22:53:28 -07004155 for_each_rx_queue(bp, i)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004156 nstats->rx_dropped +=
4157 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4158
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004159 nstats->tx_dropped = 0;
4160
4161 nstats->multicast =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004162 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004163
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004164 nstats->collisions =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004165 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004166
4167 nstats->rx_length_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004168 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4169 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4170 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4171 bnx2x_hilo(&estats->brb_truncate_hi);
4172 nstats->rx_crc_errors =
4173 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4174 nstats->rx_frame_errors =
4175 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4176 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004177 nstats->rx_missed_errors = estats->xxoverflow_discard;
4178
4179 nstats->rx_errors = nstats->rx_length_errors +
4180 nstats->rx_over_errors +
4181 nstats->rx_crc_errors +
4182 nstats->rx_frame_errors +
Eliezer Tamir0e39e642008-02-28 11:54:03 -08004183 nstats->rx_fifo_errors +
4184 nstats->rx_missed_errors;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004185
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004186 nstats->tx_aborted_errors =
Eilon Greensteinde832a52009-02-12 08:36:33 +00004187 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4188 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4189 nstats->tx_carrier_errors =
4190 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004191 nstats->tx_fifo_errors = 0;
4192 nstats->tx_heartbeat_errors = 0;
4193 nstats->tx_window_errors = 0;
4194
4195 nstats->tx_errors = nstats->tx_aborted_errors +
Eilon Greensteinde832a52009-02-12 08:36:33 +00004196 nstats->tx_carrier_errors +
4197 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4198}
4199
4200static void bnx2x_drv_stats_update(struct bnx2x *bp)
4201{
4202 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4203 int i;
4204
4205 estats->driver_xoff = 0;
4206 estats->rx_err_discard_pkt = 0;
4207 estats->rx_skb_alloc_failed = 0;
4208 estats->hw_csum_err = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07004209 for_each_rx_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00004210 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4211
4212 estats->driver_xoff += qstats->driver_xoff;
4213 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4214 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4215 estats->hw_csum_err += qstats->hw_csum_err;
4216 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004217}
4218
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004219static void bnx2x_stats_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004220{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004221 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004222
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004223 if (*stats_comp != DMAE_COMP_VAL)
4224 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004225
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004226 if (bp->port.pmf)
Eilon Greensteinde832a52009-02-12 08:36:33 +00004227 bnx2x_hw_stats_update(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004228
Eilon Greensteinde832a52009-02-12 08:36:33 +00004229 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4230 BNX2X_ERR("storm stats were not updated for 3 times\n");
4231 bnx2x_panic();
4232 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004233 }
4234
Eilon Greensteinde832a52009-02-12 08:36:33 +00004235 bnx2x_net_stats_update(bp);
4236 bnx2x_drv_stats_update(bp);
4237
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004238 if (bp->msglevel & NETIF_MSG_TIMER) {
Eilon Greensteinca003922009-08-12 22:53:28 -07004239 struct bnx2x_fastpath *fp0_rx = bp->fp;
4240 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004241 struct tstorm_per_client_stats *old_tclient =
4242 &bp->fp->old_tclient;
4243 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004244 struct bnx2x_eth_stats *estats = &bp->eth_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004245 struct net_device_stats *nstats = &bp->dev->stats;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004246 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004247
4248 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4249 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4250 " tx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004251 bnx2x_tx_avail(fp0_tx),
4252 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004253 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4254 " rx pkt (%lx)\n",
Eilon Greensteinca003922009-08-12 22:53:28 -07004255 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4256 fp0_rx->rx_comp_cons),
4257 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
Eilon Greensteinde832a52009-02-12 08:36:33 +00004258 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4259 "brb truncate %u\n",
4260 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4261 qstats->driver_xoff,
4262 estats->brb_drop_lo, estats->brb_truncate_lo);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004263 printk(KERN_DEBUG "tstats: checksum_discard %u "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004264 "packets_too_big_discard %lu no_buff_discard %lu "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004265 "mac_discard %u mac_filter_discard %u "
4266 "xxovrflow_discard %u brb_truncate_discard %u "
4267 "ttl0_discard %u\n",
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004268 le32_to_cpu(old_tclient->checksum_discard),
Eilon Greensteinde832a52009-02-12 08:36:33 +00004269 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4270 bnx2x_hilo(&qstats->no_buff_discard_hi),
4271 estats->mac_discard, estats->mac_filter_discard,
4272 estats->xxoverflow_discard, estats->brb_truncate_discard,
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00004273 le32_to_cpu(old_tclient->ttl0_discard));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004274
4275 for_each_queue(bp, i) {
4276 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4277 bnx2x_fp(bp, i, tx_pkt),
4278 bnx2x_fp(bp, i, rx_pkt),
4279 bnx2x_fp(bp, i, rx_calls));
4280 }
4281 }
4282
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004283 bnx2x_hw_stats_post(bp);
4284 bnx2x_storm_stats_post(bp);
4285}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004286
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004287static void bnx2x_port_stats_stop(struct bnx2x *bp)
4288{
4289 struct dmae_command *dmae;
4290 u32 opcode;
4291 int loader_idx = PMF_DMAE_C(bp);
4292 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004293
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004294 bp->executer_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004295
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004296 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4297 DMAE_CMD_C_ENABLE |
4298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004299#ifdef __BIG_ENDIAN
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004301#else
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004302 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004303#endif
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4306
4307 if (bp->port.port_stx) {
4308
4309 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4310 if (bp->func_stx)
4311 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4312 else
4313 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4314 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4315 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4316 dmae->dst_addr_lo = bp->port.port_stx >> 2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004317 dmae->dst_addr_hi = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004318 dmae->len = sizeof(struct host_port_stats) >> 2;
4319 if (bp->func_stx) {
4320 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4321 dmae->comp_addr_hi = 0;
4322 dmae->comp_val = 1;
4323 } else {
4324 dmae->comp_addr_lo =
4325 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4326 dmae->comp_addr_hi =
4327 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4328 dmae->comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004329
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004330 *stats_comp = 0;
4331 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004332 }
4333
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004334 if (bp->func_stx) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004335
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004336 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4337 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4338 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4339 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4340 dmae->dst_addr_lo = bp->func_stx >> 2;
4341 dmae->dst_addr_hi = 0;
4342 dmae->len = sizeof(struct host_func_stats) >> 2;
4343 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4344 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4345 dmae->comp_val = DMAE_COMP_VAL;
4346
4347 *stats_comp = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004348 }
4349}
4350
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004351static void bnx2x_stats_stop(struct bnx2x *bp)
4352{
4353 int update = 0;
4354
4355 bnx2x_stats_comp(bp);
4356
4357 if (bp->port.pmf)
4358 update = (bnx2x_hw_stats_update(bp) == 0);
4359
4360 update |= (bnx2x_storm_stats_update(bp) == 0);
4361
4362 if (update) {
4363 bnx2x_net_stats_update(bp);
4364
4365 if (bp->port.pmf)
4366 bnx2x_port_stats_stop(bp);
4367
4368 bnx2x_hw_stats_post(bp);
4369 bnx2x_stats_comp(bp);
4370 }
4371}
4372
4373static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4374{
4375}
4376
4377static const struct {
4378 void (*action)(struct bnx2x *bp);
4379 enum bnx2x_stats_state next_state;
4380} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4381/* state event */
4382{
4383/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4384/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4385/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4386/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4387},
4388{
4389/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4390/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4391/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4392/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4393}
4394};
4395
4396static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4397{
4398 enum bnx2x_stats_state state = bp->stats_state;
4399
4400 bnx2x_stats_stm[state][event].action(bp);
4401 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4402
4403 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4404 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4405 state, event, bp->stats_state);
4406}
4407
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004408static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4409{
4410 struct dmae_command *dmae;
4411 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4412
4413 /* sanity */
4414 if (!bp->port.pmf || !bp->port.port_stx) {
4415 BNX2X_ERR("BUG!\n");
4416 return;
4417 }
4418
4419 bp->executer_idx = 0;
4420
4421 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4422 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4423 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4424 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4425#ifdef __BIG_ENDIAN
4426 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4427#else
4428 DMAE_CMD_ENDIANITY_DW_SWAP |
4429#endif
4430 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4431 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4432 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4433 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4434 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4435 dmae->dst_addr_hi = 0;
4436 dmae->len = sizeof(struct host_port_stats) >> 2;
4437 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4438 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4439 dmae->comp_val = DMAE_COMP_VAL;
4440
4441 *stats_comp = 0;
4442 bnx2x_hw_stats_post(bp);
4443 bnx2x_stats_comp(bp);
4444}
4445
4446static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4447{
4448 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4449 int port = BP_PORT(bp);
4450 int func;
4451 u32 func_stx;
4452
4453 /* sanity */
4454 if (!bp->port.pmf || !bp->func_stx) {
4455 BNX2X_ERR("BUG!\n");
4456 return;
4457 }
4458
4459 /* save our func_stx */
4460 func_stx = bp->func_stx;
4461
4462 for (vn = VN_0; vn < vn_max; vn++) {
4463 func = 2*vn + port;
4464
4465 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4466 bnx2x_func_stats_init(bp);
4467 bnx2x_hw_stats_post(bp);
4468 bnx2x_stats_comp(bp);
4469 }
4470
4471 /* restore our func_stx */
4472 bp->func_stx = func_stx;
4473}
4474
4475static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4476{
4477 struct dmae_command *dmae = &bp->stats_dmae;
4478 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4479
4480 /* sanity */
4481 if (!bp->func_stx) {
4482 BNX2X_ERR("BUG!\n");
4483 return;
4484 }
4485
4486 bp->executer_idx = 0;
4487 memset(dmae, 0, sizeof(struct dmae_command));
4488
4489 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4490 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4491 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4492#ifdef __BIG_ENDIAN
4493 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4494#else
4495 DMAE_CMD_ENDIANITY_DW_SWAP |
4496#endif
4497 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4498 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4499 dmae->src_addr_lo = bp->func_stx >> 2;
4500 dmae->src_addr_hi = 0;
4501 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4502 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4503 dmae->len = sizeof(struct host_func_stats) >> 2;
4504 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4505 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4506 dmae->comp_val = DMAE_COMP_VAL;
4507
4508 *stats_comp = 0;
4509 bnx2x_hw_stats_post(bp);
4510 bnx2x_stats_comp(bp);
4511}
4512
4513static void bnx2x_stats_init(struct bnx2x *bp)
4514{
4515 int port = BP_PORT(bp);
4516 int func = BP_FUNC(bp);
4517 int i;
4518
4519 bp->stats_pending = 0;
4520 bp->executer_idx = 0;
4521 bp->stats_counter = 0;
4522
4523 /* port and func stats for management */
4524 if (!BP_NOMCP(bp)) {
4525 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4526 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4527
4528 } else {
4529 bp->port.port_stx = 0;
4530 bp->func_stx = 0;
4531 }
4532 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4533 bp->port.port_stx, bp->func_stx);
4534
4535 /* port stats */
4536 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4537 bp->port.old_nig_stats.brb_discard =
4538 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4539 bp->port.old_nig_stats.brb_truncate =
4540 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4541 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4542 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4543 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4544 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4545
4546 /* function stats */
4547 for_each_queue(bp, i) {
4548 struct bnx2x_fastpath *fp = &bp->fp[i];
4549
4550 memset(&fp->old_tclient, 0,
4551 sizeof(struct tstorm_per_client_stats));
4552 memset(&fp->old_uclient, 0,
4553 sizeof(struct ustorm_per_client_stats));
4554 memset(&fp->old_xclient, 0,
4555 sizeof(struct xstorm_per_client_stats));
4556 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4557 }
4558
4559 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4560 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4561
4562 bp->stats_state = STATS_STATE_DISABLED;
4563
4564 if (bp->port.pmf) {
4565 if (bp->port.port_stx)
4566 bnx2x_port_stats_base_init(bp);
4567
4568 if (bp->func_stx)
4569 bnx2x_func_stats_base_init(bp);
4570
4571 } else if (bp->func_stx)
4572 bnx2x_func_stats_base_update(bp);
4573}
4574
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004575static void bnx2x_timer(unsigned long data)
4576{
4577 struct bnx2x *bp = (struct bnx2x *) data;
4578
4579 if (!netif_running(bp->dev))
4580 return;
4581
4582 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08004583 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004584
4585 if (poll) {
4586 struct bnx2x_fastpath *fp = &bp->fp[0];
4587 int rc;
4588
Eilon Greenstein7961f792009-03-02 07:59:31 +00004589 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004590 rc = bnx2x_rx_int(fp, 1000);
4591 }
4592
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004593 if (!BP_NOMCP(bp)) {
4594 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004595 u32 drv_pulse;
4596 u32 mcp_pulse;
4597
4598 ++bp->fw_drv_pulse_wr_seq;
4599 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4600 /* TBD - add SYSTEM_TIME */
4601 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004602 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004603
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004604 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004605 MCP_PULSE_SEQ_MASK);
4606 /* The delta between driver pulse and mcp response
4607 * should be 1 (before mcp response) or 0 (after mcp response)
4608 */
4609 if ((drv_pulse != mcp_pulse) &&
4610 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4611 /* someone lost a heartbeat... */
4612 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4613 drv_pulse, mcp_pulse);
4614 }
4615 }
4616
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004617 if ((bp->state == BNX2X_STATE_OPEN) ||
4618 (bp->state == BNX2X_STATE_DISABLED))
4619 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004620
Eliezer Tamirf1410642008-02-28 11:51:50 -08004621timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004622 mod_timer(&bp->timer, jiffies + bp->current_interval);
4623}
4624
4625/* end of Statistics */
4626
4627/* nic init */
4628
4629/*
4630 * nic init service functions
4631 */
4632
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004633static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004634{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004635 int port = BP_PORT(bp);
4636
Eilon Greensteinca003922009-08-12 22:53:28 -07004637 /* "CSTORM" */
4638 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4639 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4640 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4641 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4642 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4643 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004644}
4645
Eilon Greenstein5c862842008-08-13 15:51:48 -07004646static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4647 dma_addr_t mapping, int sb_id)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004648{
4649 int port = BP_PORT(bp);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004650 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004651 int index;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004652 u64 section;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004653
4654 /* USTORM */
4655 section = ((u64)mapping) + offsetof(struct host_status_block,
4656 u_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004657 sb->u_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004658
Eilon Greensteinca003922009-08-12 22:53:28 -07004659 REG_WR(bp, BAR_CSTRORM_INTMEM +
4660 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4661 REG_WR(bp, BAR_CSTRORM_INTMEM +
4662 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004663 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004664 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4665 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004666
4667 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004668 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4669 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004670
4671 /* CSTORM */
4672 section = ((u64)mapping) + offsetof(struct host_status_block,
4673 c_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004674 sb->c_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004675
4676 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004677 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004678 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004679 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004680 U64_HI(section));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004681 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004682 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004683
4684 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4685 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004686 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004687
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004688 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4689}
4690
4691static void bnx2x_zero_def_sb(struct bnx2x *bp)
4692{
4693 int func = BP_FUNC(bp);
4694
Eilon Greensteinca003922009-08-12 22:53:28 -07004695 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004696 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4697 sizeof(struct tstorm_def_status_block)/4);
Eilon Greensteinca003922009-08-12 22:53:28 -07004698 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4699 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4700 sizeof(struct cstorm_def_status_block_u)/4);
4701 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4702 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4703 sizeof(struct cstorm_def_status_block_c)/4);
4704 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
Eilon Greenstein490c3c92009-03-02 07:59:52 +00004705 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4706 sizeof(struct xstorm_def_status_block)/4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004707}
4708
4709static void bnx2x_init_def_sb(struct bnx2x *bp,
4710 struct host_def_status_block *def_sb,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004711 dma_addr_t mapping, int sb_id)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004712{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004713 int port = BP_PORT(bp);
4714 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004715 int index, val, reg_offset;
4716 u64 section;
4717
4718 /* ATTN */
4719 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4720 atten_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004721 def_sb->atten_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004722
Eliezer Tamir49d66772008-02-28 11:53:13 -08004723 bp->attn_state = 0;
4724
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004725 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4726 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4727
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004728 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004729 bp->attn_group[index].sig[0] = REG_RD(bp,
4730 reg_offset + 0x10*index);
4731 bp->attn_group[index].sig[1] = REG_RD(bp,
4732 reg_offset + 0x4 + 0x10*index);
4733 bp->attn_group[index].sig[2] = REG_RD(bp,
4734 reg_offset + 0x8 + 0x10*index);
4735 bp->attn_group[index].sig[3] = REG_RD(bp,
4736 reg_offset + 0xc + 0x10*index);
4737 }
4738
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004739 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4740 HC_REG_ATTN_MSG0_ADDR_L);
4741
4742 REG_WR(bp, reg_offset, U64_LO(section));
4743 REG_WR(bp, reg_offset + 4, U64_HI(section));
4744
4745 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4746
4747 val = REG_RD(bp, reg_offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004748 val |= sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004749 REG_WR(bp, reg_offset, val);
4750
4751 /* USTORM */
4752 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4753 u_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004754 def_sb->u_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004755
Eilon Greensteinca003922009-08-12 22:53:28 -07004756 REG_WR(bp, BAR_CSTRORM_INTMEM +
4757 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4758 REG_WR(bp, BAR_CSTRORM_INTMEM +
4759 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004760 U64_HI(section));
Eilon Greensteinca003922009-08-12 22:53:28 -07004761 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4762 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004763
4764 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
Eilon Greensteinca003922009-08-12 22:53:28 -07004765 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4766 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004767
4768 /* CSTORM */
4769 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4770 c_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004771 def_sb->c_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004772
4773 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004774 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004775 REG_WR(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004776 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004777 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004778 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
Eilon Greensteinca003922009-08-12 22:53:28 -07004779 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004780
4781 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4782 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004783 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004784
4785 /* TSTORM */
4786 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4787 t_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004788 def_sb->t_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004789
4790 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004791 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004792 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004793 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004794 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004795 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004796 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004797
4798 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4799 REG_WR16(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004800 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004801
4802 /* XSTORM */
4803 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4804 x_def_status_block);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004805 def_sb->x_def_status_block.status_block_id = sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004806
4807 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004808 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004809 REG_WR(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004810 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004811 U64_HI(section));
Eilon Greenstein5c862842008-08-13 15:51:48 -07004812 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004813 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004814
4815 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4816 REG_WR16(bp, BAR_XSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004817 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004818
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004819 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004820 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004821
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004822 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004823}
4824
4825static void bnx2x_update_coalesce(struct bnx2x *bp)
4826{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004827 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004828 int i;
4829
4830 for_each_queue(bp, i) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004831 int sb_id = bp->fp[i].sb_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004832
4833 /* HC_INDEX_U_ETH_RX_CQ_CONS */
Eilon Greensteinca003922009-08-12 22:53:28 -07004834 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4835 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4836 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004837 bp->rx_ticks/12);
Eilon Greensteinca003922009-08-12 22:53:28 -07004838 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4839 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4840 U_SB_ETH_RX_CQ_INDEX),
Eilon Greenstein3799cf42009-07-05 04:18:12 +00004841 (bp->rx_ticks/12) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004842
4843 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4844 REG_WR8(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004845 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4846 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004847 bp->tx_ticks/12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004848 REG_WR16(bp, BAR_CSTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07004849 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4850 C_SB_ETH_TX_CQ_INDEX),
Eilon Greenstein3799cf42009-07-05 04:18:12 +00004851 (bp->tx_ticks/12) ? 0 : 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004852 }
4853}
4854
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004855static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4856 struct bnx2x_fastpath *fp, int last)
4857{
4858 int i;
4859
4860 for (i = 0; i < last; i++) {
4861 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4862 struct sk_buff *skb = rx_buf->skb;
4863
4864 if (skb == NULL) {
4865 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4866 continue;
4867 }
4868
4869 if (fp->tpa_state[i] == BNX2X_TPA_START)
4870 pci_unmap_single(bp->pdev,
4871 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00004872 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004873
4874 dev_kfree_skb(skb);
4875 rx_buf->skb = NULL;
4876 }
4877}
4878
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004879static void bnx2x_init_rx_rings(struct bnx2x *bp)
4880{
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004881 int func = BP_FUNC(bp);
Eilon Greenstein32626232008-08-13 15:51:07 -07004882 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4883 ETH_MAX_AGGREGATION_QUEUES_E1H;
4884 u16 ring_prod, cqe_ring_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004885 int i, j;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004886
Eilon Greenstein87942b42009-02-12 08:36:49 +00004887 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
Eilon Greenstein0f008462009-02-12 08:36:18 +00004888 DP(NETIF_MSG_IFUP,
4889 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004890
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004891 if (bp->flags & TPA_ENABLE_FLAG) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004892
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004893 for_each_rx_queue(bp, j) {
Eilon Greenstein32626232008-08-13 15:51:07 -07004894 struct bnx2x_fastpath *fp = &bp->fp[j];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004895
Eilon Greenstein32626232008-08-13 15:51:07 -07004896 for (i = 0; i < max_agg_queues; i++) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004897 fp->tpa_pool[i].skb =
4898 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4899 if (!fp->tpa_pool[i].skb) {
4900 BNX2X_ERR("Failed to allocate TPA "
4901 "skb pool for queue[%d] - "
4902 "disabling TPA on this "
4903 "queue!\n", j);
4904 bnx2x_free_tpa_pool(bp, fp, i);
4905 fp->disable_tpa = 1;
4906 break;
4907 }
4908 pci_unmap_addr_set((struct sw_rx_bd *)
4909 &bp->fp->tpa_pool[i],
4910 mapping, 0);
4911 fp->tpa_state[i] = BNX2X_TPA_STOP;
4912 }
4913 }
4914 }
4915
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004916 for_each_rx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004917 struct bnx2x_fastpath *fp = &bp->fp[j];
4918
4919 fp->rx_bd_cons = 0;
4920 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004921 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004922
Eilon Greensteinca003922009-08-12 22:53:28 -07004923 /* Mark queue as Rx */
4924 fp->is_rx_queue = 1;
4925
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004926 /* "next page" elements initialization */
4927 /* SGE ring */
4928 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4929 struct eth_rx_sge *sge;
4930
4931 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4932 sge->addr_hi =
4933 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4934 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4935 sge->addr_lo =
4936 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4937 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4938 }
4939
4940 bnx2x_init_sge_ring_bit_mask(fp);
4941
4942 /* RX BD ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004943 for (i = 1; i <= NUM_RX_RINGS; i++) {
4944 struct eth_rx_bd *rx_bd;
4945
4946 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4947 rx_bd->addr_hi =
4948 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004949 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004950 rx_bd->addr_lo =
4951 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004952 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004953 }
4954
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004955 /* CQ ring */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004956 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4957 struct eth_rx_cqe_next_page *nextpg;
4958
4959 nextpg = (struct eth_rx_cqe_next_page *)
4960 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4961 nextpg->addr_hi =
4962 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004963 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004964 nextpg->addr_lo =
4965 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004966 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004967 }
4968
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004969 /* Allocate SGEs and initialize the ring elements */
4970 for (i = 0, ring_prod = 0;
4971 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004972
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004973 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4974 BNX2X_ERR("was only able to allocate "
4975 "%d rx sges\n", i);
4976 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4977 /* Cleanup already allocated elements */
4978 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
Eilon Greenstein32626232008-08-13 15:51:07 -07004979 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004980 fp->disable_tpa = 1;
4981 ring_prod = 0;
4982 break;
4983 }
4984 ring_prod = NEXT_SGE_IDX(ring_prod);
4985 }
4986 fp->rx_sge_prod = ring_prod;
4987
4988 /* Allocate BDs and initialize BD ring */
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004989 fp->rx_comp_cons = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004990 cqe_ring_prod = ring_prod = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004991 for (i = 0; i < bp->rx_ring_size; i++) {
4992 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4993 BNX2X_ERR("was only able to allocate "
Eilon Greensteinde832a52009-02-12 08:36:33 +00004994 "%d rx skbs on queue[%d]\n", i, j);
4995 fp->eth_q_stats.rx_skb_alloc_failed++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004996 break;
4997 }
4998 ring_prod = NEXT_RX_IDX(ring_prod);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07004999 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
Ilpo Järvinen53e5e962008-07-25 21:40:45 -07005000 WARN_ON(ring_prod <= i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005001 }
5002
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005003 fp->rx_bd_prod = ring_prod;
5004 /* must not have more available CQEs than BDs */
5005 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5006 cqe_ring_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005007 fp->rx_pkt = fp->rx_calls = 0;
5008
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005009 /* Warning!
5010 * this will generate an interrupt (to the TSTORM)
5011 * must only be done after chip is initialized
5012 */
5013 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5014 fp->rx_sge_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005015 if (j != 0)
5016 continue;
5017
5018 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005019 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005020 U64_LO(fp->rx_comp_mapping));
5021 REG_WR(bp, BAR_USTRORM_INTMEM +
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005022 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005023 U64_HI(fp->rx_comp_mapping));
5024 }
5025}
5026
5027static void bnx2x_init_tx_ring(struct bnx2x *bp)
5028{
5029 int i, j;
5030
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005031 for_each_tx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005032 struct bnx2x_fastpath *fp = &bp->fp[j];
5033
5034 for (i = 1; i <= NUM_TX_RINGS; i++) {
Eilon Greensteinca003922009-08-12 22:53:28 -07005035 struct eth_tx_next_bd *tx_next_bd =
5036 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005037
Eilon Greensteinca003922009-08-12 22:53:28 -07005038 tx_next_bd->addr_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005039 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005040 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eilon Greensteinca003922009-08-12 22:53:28 -07005041 tx_next_bd->addr_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005042 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005043 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005044 }
5045
Eilon Greensteinca003922009-08-12 22:53:28 -07005046 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5047 fp->tx_db.data.zero_fill1 = 0;
5048 fp->tx_db.data.prod = 0;
5049
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005050 fp->tx_pkt_prod = 0;
5051 fp->tx_pkt_cons = 0;
5052 fp->tx_bd_prod = 0;
5053 fp->tx_bd_cons = 0;
5054 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5055 fp->tx_pkt = 0;
5056 }
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005057
5058 /* clean tx statistics */
5059 for_each_rx_queue(bp, i)
5060 bnx2x_fp(bp, i, tx_pkt) = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005061}
5062
5063static void bnx2x_init_sp_ring(struct bnx2x *bp)
5064{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005065 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005066
5067 spin_lock_init(&bp->spq_lock);
5068
5069 bp->spq_left = MAX_SPQ_PENDING;
5070 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005071 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5072 bp->spq_prod_bd = bp->spq;
5073 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5074
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005075 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005076 U64_LO(bp->spq_mapping));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005077 REG_WR(bp,
5078 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005079 U64_HI(bp->spq_mapping));
5080
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005081 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005082 bp->spq_prod_idx);
5083}
5084
5085static void bnx2x_init_context(struct bnx2x *bp)
5086{
5087 int i;
5088
Eilon Greensteinca003922009-08-12 22:53:28 -07005089 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005090 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5091 struct bnx2x_fastpath *fp = &bp->fp[i];
Eilon Greensteinde832a52009-02-12 08:36:33 +00005092 u8 cl_id = fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005093
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005094 context->ustorm_st_context.common.sb_index_numbers =
5095 BNX2X_RX_SB_INDEX_NUM;
Eilon Greenstein0626b892009-02-12 08:38:14 +00005096 context->ustorm_st_context.common.clientId = cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07005097 context->ustorm_st_context.common.status_block_id = fp->sb_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005098 context->ustorm_st_context.common.flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005099 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5100 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5101 context->ustorm_st_context.common.statistics_counter_id =
5102 cl_id;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005103 context->ustorm_st_context.common.mc_alignment_log_size =
Eilon Greenstein0f008462009-02-12 08:36:18 +00005104 BNX2X_RX_ALIGN_SHIFT;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005105 context->ustorm_st_context.common.bd_buff_size =
Eilon Greenstein437cf2f2008-09-03 14:38:00 -07005106 bp->rx_buf_size;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005107 context->ustorm_st_context.common.bd_page_base_hi =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005108 U64_HI(fp->rx_desc_mapping);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005109 context->ustorm_st_context.common.bd_page_base_lo =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005110 U64_LO(fp->rx_desc_mapping);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005111 if (!fp->disable_tpa) {
5112 context->ustorm_st_context.common.flags |=
Eilon Greensteinca003922009-08-12 22:53:28 -07005113 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005114 context->ustorm_st_context.common.sge_buff_size =
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005115 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5116 (u32)0xffff);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005117 context->ustorm_st_context.common.sge_page_base_hi =
5118 U64_HI(fp->rx_sge_mapping);
5119 context->ustorm_st_context.common.sge_page_base_lo =
5120 U64_LO(fp->rx_sge_mapping);
Eilon Greensteinca003922009-08-12 22:53:28 -07005121
5122 context->ustorm_st_context.common.max_sges_for_packet =
5123 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5124 context->ustorm_st_context.common.max_sges_for_packet =
5125 ((context->ustorm_st_context.common.
5126 max_sges_for_packet + PAGES_PER_SGE - 1) &
5127 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005128 }
5129
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005130 context->ustorm_ag_context.cdu_usage =
5131 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5132 CDU_REGION_NUMBER_UCM_AG,
5133 ETH_CONNECTION_TYPE);
5134
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005135 context->xstorm_ag_context.cdu_reserved =
5136 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5137 CDU_REGION_NUMBER_XCM_AG,
5138 ETH_CONNECTION_TYPE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005139 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005140
5141 for_each_tx_queue(bp, i) {
5142 struct bnx2x_fastpath *fp = &bp->fp[i];
5143 struct eth_context *context =
5144 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5145
5146 context->cstorm_st_context.sb_index_number =
5147 C_SB_ETH_TX_CQ_INDEX;
5148 context->cstorm_st_context.status_block_id = fp->sb_id;
5149
5150 context->xstorm_st_context.tx_bd_page_base_hi =
5151 U64_HI(fp->tx_desc_mapping);
5152 context->xstorm_st_context.tx_bd_page_base_lo =
5153 U64_LO(fp->tx_desc_mapping);
5154 context->xstorm_st_context.statistics_data = (fp->cl_id |
5155 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5156 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005157}
5158
5159static void bnx2x_init_ind_table(struct bnx2x *bp)
5160{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005161 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005162 int i;
5163
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005164 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005165 return;
5166
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005167 DP(NETIF_MSG_IFUP,
5168 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005169 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005170 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005171 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005172 bp->fp->cl_id + (i % bp->num_rx_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005173}
5174
Eliezer Tamir49d66772008-02-28 11:53:13 -08005175static void bnx2x_set_client_config(struct bnx2x *bp)
5176{
Eliezer Tamir49d66772008-02-28 11:53:13 -08005177 struct tstorm_eth_client_config tstorm_client = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005178 int port = BP_PORT(bp);
5179 int i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005180
Eilon Greensteine7799c52009-01-14 21:30:27 -08005181 tstorm_client.mtu = bp->dev->mtu;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005182 tstorm_client.config_flags =
Eilon Greensteinde832a52009-02-12 08:36:33 +00005183 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5184 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005185#ifdef BCM_VLAN
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08005186 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
Eliezer Tamir49d66772008-02-28 11:53:13 -08005187 tstorm_client.config_flags |=
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005188 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
Eliezer Tamir49d66772008-02-28 11:53:13 -08005189 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5190 }
5191#endif
Eliezer Tamir49d66772008-02-28 11:53:13 -08005192
5193 for_each_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +00005194 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5195
Eliezer Tamir49d66772008-02-28 11:53:13 -08005196 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005197 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
Eliezer Tamir49d66772008-02-28 11:53:13 -08005198 ((u32 *)&tstorm_client)[0]);
5199 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005200 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
Eliezer Tamir49d66772008-02-28 11:53:13 -08005201 ((u32 *)&tstorm_client)[1]);
5202 }
5203
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005204 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5205 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005206}
5207
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005208static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5209{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005210 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005211 int mode = bp->rx_mode;
5212 int mask = (1 << BP_L_ID(bp));
5213 int func = BP_FUNC(bp);
Eilon Greenstein581ce432009-07-29 00:20:04 +00005214 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005215 int i;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005216 /* All but management unicast packets should pass to the host as well */
5217 u32 llh_mask =
5218 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5219 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5220 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5221 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005222
Eilon Greenstein3196a882008-08-13 15:58:49 -07005223 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005224
5225 switch (mode) {
5226 case BNX2X_RX_MODE_NONE: /* no Rx */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005227 tstorm_mac_filter.ucast_drop_all = mask;
5228 tstorm_mac_filter.mcast_drop_all = mask;
5229 tstorm_mac_filter.bcast_drop_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005230 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005231
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005232 case BNX2X_RX_MODE_NORMAL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005233 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005234 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005235
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005236 case BNX2X_RX_MODE_ALLMULTI:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005237 tstorm_mac_filter.mcast_accept_all = mask;
5238 tstorm_mac_filter.bcast_accept_all = mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005239 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005240
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005241 case BNX2X_RX_MODE_PROMISC:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005242 tstorm_mac_filter.ucast_accept_all = mask;
5243 tstorm_mac_filter.mcast_accept_all = mask;
5244 tstorm_mac_filter.bcast_accept_all = mask;
Eilon Greenstein581ce432009-07-29 00:20:04 +00005245 /* pass management unicast packets as well */
5246 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005247 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00005248
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005249 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005250 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5251 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005252 }
5253
Eilon Greenstein581ce432009-07-29 00:20:04 +00005254 REG_WR(bp,
5255 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5256 llh_mask);
5257
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005258 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5259 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005260 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005261 ((u32 *)&tstorm_mac_filter)[i]);
5262
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005263/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005264 ((u32 *)&tstorm_mac_filter)[i]); */
5265 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005266
Eliezer Tamir49d66772008-02-28 11:53:13 -08005267 if (mode != BNX2X_RX_MODE_NONE)
5268 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005269}
5270
Eilon Greenstein471de712008-08-13 15:49:35 -07005271static void bnx2x_init_internal_common(struct bnx2x *bp)
5272{
5273 int i;
5274
5275 /* Zero this manually as its initialization is
5276 currently missing in the initTool */
5277 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5278 REG_WR(bp, BAR_USTRORM_INTMEM +
5279 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5280}
5281
5282static void bnx2x_init_internal_port(struct bnx2x *bp)
5283{
5284 int port = BP_PORT(bp);
5285
Eilon Greensteinca003922009-08-12 22:53:28 -07005286 REG_WR(bp,
5287 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5288 REG_WR(bp,
5289 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
Eilon Greenstein471de712008-08-13 15:49:35 -07005290 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5291 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5292}
5293
5294static void bnx2x_init_internal_func(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005295{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005296 struct tstorm_eth_function_common_config tstorm_config = {0};
5297 struct stats_indication_flags stats_flags = {0};
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005298 int port = BP_PORT(bp);
5299 int func = BP_FUNC(bp);
Eilon Greensteinde832a52009-02-12 08:36:33 +00005300 int i, j;
5301 u32 offset;
Eilon Greenstein471de712008-08-13 15:49:35 -07005302 u16 max_agg_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005303
5304 if (is_multi(bp)) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005305 tstorm_config.config_flags = MULTI_FLAGS(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005306 tstorm_config.rss_result_mask = MULTI_MASK;
5307 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005308
5309 /* Enable TPA if needed */
5310 if (bp->flags & TPA_ENABLE_FLAG)
5311 tstorm_config.config_flags |=
5312 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5313
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005314 if (IS_E1HMF(bp))
5315 tstorm_config.config_flags |=
5316 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005317
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005318 tstorm_config.leading_client_id = BP_L_ID(bp);
5319
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005320 REG_WR(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005321 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005322 (*(u32 *)&tstorm_config));
5323
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005324 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005325 bnx2x_set_storm_rx_mode(bp);
5326
Eilon Greensteinde832a52009-02-12 08:36:33 +00005327 for_each_queue(bp, i) {
5328 u8 cl_id = bp->fp[i].cl_id;
5329
5330 /* reset xstorm per client statistics */
5331 offset = BAR_XSTRORM_INTMEM +
5332 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5333 for (j = 0;
5334 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5335 REG_WR(bp, offset + j*4, 0);
5336
5337 /* reset tstorm per client statistics */
5338 offset = BAR_TSTRORM_INTMEM +
5339 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5340 for (j = 0;
5341 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5342 REG_WR(bp, offset + j*4, 0);
5343
5344 /* reset ustorm per client statistics */
5345 offset = BAR_USTRORM_INTMEM +
5346 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5347 for (j = 0;
5348 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5349 REG_WR(bp, offset + j*4, 0);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005350 }
5351
5352 /* Init statistics related context */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005353 stats_flags.collect_eth = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005354
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005355 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005356 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005357 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005358 ((u32 *)&stats_flags)[1]);
5359
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005360 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005361 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005362 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005363 ((u32 *)&stats_flags)[1]);
5364
Eilon Greensteinde832a52009-02-12 08:36:33 +00005365 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5366 ((u32 *)&stats_flags)[0]);
5367 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5368 ((u32 *)&stats_flags)[1]);
5369
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005370 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005371 ((u32 *)&stats_flags)[0]);
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005372 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005373 ((u32 *)&stats_flags)[1]);
5374
Yitchak Gertner66e855f2008-08-13 15:49:05 -07005375 REG_WR(bp, BAR_XSTRORM_INTMEM +
5376 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5377 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5378 REG_WR(bp, BAR_XSTRORM_INTMEM +
5379 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5380 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5381
5382 REG_WR(bp, BAR_TSTRORM_INTMEM +
5383 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5384 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5385 REG_WR(bp, BAR_TSTRORM_INTMEM +
5386 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5387 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005388
Eilon Greensteinde832a52009-02-12 08:36:33 +00005389 REG_WR(bp, BAR_USTRORM_INTMEM +
5390 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5391 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5392 REG_WR(bp, BAR_USTRORM_INTMEM +
5393 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5394 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5395
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005396 if (CHIP_IS_E1H(bp)) {
5397 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5398 IS_E1HMF(bp));
5399 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5400 IS_E1HMF(bp));
5401 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5402 IS_E1HMF(bp));
5403 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5404 IS_E1HMF(bp));
5405
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005406 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5407 bp->e1hov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005408 }
5409
Eilon Greenstein4f40f2c2009-01-14 21:24:17 -08005410 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5411 max_agg_size =
5412 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5413 SGE_PAGE_SIZE * PAGES_PER_SGE),
5414 (u32)0xffff);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005415 for_each_rx_queue(bp, i) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005416 struct bnx2x_fastpath *fp = &bp->fp[i];
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005417
5418 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005419 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005420 U64_LO(fp->rx_comp_mapping));
5421 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005422 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005423 U64_HI(fp->rx_comp_mapping));
5424
Eilon Greensteinca003922009-08-12 22:53:28 -07005425 /* Next page */
5426 REG_WR(bp, BAR_USTRORM_INTMEM +
5427 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5428 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5429 REG_WR(bp, BAR_USTRORM_INTMEM +
5430 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5431 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5432
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005433 REG_WR16(bp, BAR_USTRORM_INTMEM +
Eilon Greenstein0626b892009-02-12 08:38:14 +00005434 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005435 max_agg_size);
5436 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005437
Eilon Greenstein1c063282009-02-12 08:36:43 +00005438 /* dropless flow control */
5439 if (CHIP_IS_E1H(bp)) {
5440 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5441
5442 rx_pause.bd_thr_low = 250;
5443 rx_pause.cqe_thr_low = 250;
5444 rx_pause.cos = 1;
5445 rx_pause.sge_thr_low = 0;
5446 rx_pause.bd_thr_high = 350;
5447 rx_pause.cqe_thr_high = 350;
5448 rx_pause.sge_thr_high = 0;
5449
5450 for_each_rx_queue(bp, i) {
5451 struct bnx2x_fastpath *fp = &bp->fp[i];
5452
5453 if (!fp->disable_tpa) {
5454 rx_pause.sge_thr_low = 150;
5455 rx_pause.sge_thr_high = 250;
5456 }
5457
5458
5459 offset = BAR_USTRORM_INTMEM +
5460 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5461 fp->cl_id);
5462 for (j = 0;
5463 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5464 j++)
5465 REG_WR(bp, offset + j*4,
5466 ((u32 *)&rx_pause)[j]);
5467 }
5468 }
5469
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00005470 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5471
5472 /* Init rate shaping and fairness contexts */
5473 if (IS_E1HMF(bp)) {
5474 int vn;
5475
5476 /* During init there is no active link
5477 Until link is up, set link rate to 10Gbps */
5478 bp->link_vars.line_speed = SPEED_10000;
5479 bnx2x_init_port_minmax(bp);
5480
5481 bnx2x_calc_vn_weight_sum(bp);
5482
5483 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5484 bnx2x_init_vn_minmax(bp, 2*vn + port);
5485
5486 /* Enable rate shaping and fairness */
5487 bp->cmng.flags.cmng_enables =
5488 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5489 if (bp->vn_weight_sum)
5490 bp->cmng.flags.cmng_enables |=
5491 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5492 else
5493 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5494 " fairness will be disabled\n");
5495 } else {
5496 /* rate shaping and fairness are disabled */
5497 DP(NETIF_MSG_IFUP,
5498 "single function mode minmax will be disabled\n");
5499 }
5500
5501
5502 /* Store it to internal memory */
5503 if (bp->port.pmf)
5504 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5505 REG_WR(bp, BAR_XSTRORM_INTMEM +
5506 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5507 ((u32 *)(&bp->cmng))[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005508}
5509
Eilon Greenstein471de712008-08-13 15:49:35 -07005510static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5511{
5512 switch (load_code) {
5513 case FW_MSG_CODE_DRV_LOAD_COMMON:
5514 bnx2x_init_internal_common(bp);
5515 /* no break */
5516
5517 case FW_MSG_CODE_DRV_LOAD_PORT:
5518 bnx2x_init_internal_port(bp);
5519 /* no break */
5520
5521 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5522 bnx2x_init_internal_func(bp);
5523 break;
5524
5525 default:
5526 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5527 break;
5528 }
5529}
5530
5531static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005532{
5533 int i;
5534
5535 for_each_queue(bp, i) {
5536 struct bnx2x_fastpath *fp = &bp->fp[i];
5537
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005538 fp->bp = bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005539 fp->state = BNX2X_FP_STATE_CLOSED;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005540 fp->index = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005541 fp->cl_id = BP_L_ID(bp) + i;
5542 fp->sb_id = fp->cl_id;
Eilon Greensteinca003922009-08-12 22:53:28 -07005543 /* Suitable Rx and Tx SBs are served by the same client */
5544 if (i >= bp->num_rx_queues)
5545 fp->cl_id -= bp->num_rx_queues;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005546 DP(NETIF_MSG_IFUP,
Eilon Greensteinf5372252009-02-12 08:38:30 +00005547 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5548 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005549 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
Eilon Greenstein0626b892009-02-12 08:38:14 +00005550 fp->sb_id);
Eilon Greenstein5c862842008-08-13 15:51:48 -07005551 bnx2x_update_fpsb_idx(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005552 }
5553
Eilon Greenstein16119782009-03-02 07:59:27 +00005554 /* ensure status block indices were read */
5555 rmb();
5556
5557
Eilon Greenstein5c862842008-08-13 15:51:48 -07005558 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5559 DEF_SB_ID);
5560 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005561 bnx2x_update_coalesce(bp);
5562 bnx2x_init_rx_rings(bp);
5563 bnx2x_init_tx_ring(bp);
5564 bnx2x_init_sp_ring(bp);
5565 bnx2x_init_context(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07005566 bnx2x_init_internal(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005567 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08005568 bnx2x_stats_init(bp);
5569
5570 /* At this point, we are ready for interrupts */
5571 atomic_set(&bp->intr_sem, 0);
5572
5573 /* flush all before enabling interrupts */
5574 mb();
5575 mmiowb();
5576
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08005577 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00005578
5579 /* Check for SPIO5 */
5580 bnx2x_attn_int_deasserted0(bp,
5581 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5582 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005583}
5584
5585/* end of nic init */
5586
5587/*
5588 * gzip service functions
5589 */
5590
5591static int bnx2x_gunzip_init(struct bnx2x *bp)
5592{
5593 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5594 &bp->gunzip_mapping);
5595 if (bp->gunzip_buf == NULL)
5596 goto gunzip_nomem1;
5597
5598 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5599 if (bp->strm == NULL)
5600 goto gunzip_nomem2;
5601
5602 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5603 GFP_KERNEL);
5604 if (bp->strm->workspace == NULL)
5605 goto gunzip_nomem3;
5606
5607 return 0;
5608
5609gunzip_nomem3:
5610 kfree(bp->strm);
5611 bp->strm = NULL;
5612
5613gunzip_nomem2:
5614 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5615 bp->gunzip_mapping);
5616 bp->gunzip_buf = NULL;
5617
5618gunzip_nomem1:
5619 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005620 " un-compression\n", bp->dev->name);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005621 return -ENOMEM;
5622}
5623
5624static void bnx2x_gunzip_end(struct bnx2x *bp)
5625{
5626 kfree(bp->strm->workspace);
5627
5628 kfree(bp->strm);
5629 bp->strm = NULL;
5630
5631 if (bp->gunzip_buf) {
5632 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5633 bp->gunzip_mapping);
5634 bp->gunzip_buf = NULL;
5635 }
5636}
5637
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005638static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005639{
5640 int n, rc;
5641
5642 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005643 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5644 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005645 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005646 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005647
5648 n = 10;
5649
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005650#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005651
5652 if (zbuf[3] & FNAME)
5653 while ((zbuf[n++] != 0) && (n < len));
5654
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005655 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005656 bp->strm->avail_in = len - n;
5657 bp->strm->next_out = bp->gunzip_buf;
5658 bp->strm->avail_out = FW_BUF_SIZE;
5659
5660 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5661 if (rc != Z_OK)
5662 return rc;
5663
5664 rc = zlib_inflate(bp->strm, Z_FINISH);
5665 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5666 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5667 bp->dev->name, bp->strm->msg);
5668
5669 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5670 if (bp->gunzip_outlen & 0x3)
5671 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5672 " gunzip_outlen (%d) not aligned\n",
5673 bp->dev->name, bp->gunzip_outlen);
5674 bp->gunzip_outlen >>= 2;
5675
5676 zlib_inflateEnd(bp->strm);
5677
5678 if (rc == Z_STREAM_END)
5679 return 0;
5680
5681 return rc;
5682}
5683
5684/* nic load/unload */
5685
5686/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005687 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005688 */
5689
5690/* send a NIG loopback debug packet */
5691static void bnx2x_lb_pckt(struct bnx2x *bp)
5692{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005693 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005694
5695 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005696 wb_write[0] = 0x55555555;
5697 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005698 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005699 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005700
5701 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005702 wb_write[0] = 0x09000000;
5703 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005704 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005705 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005706}
5707
5708/* some of the internal memories
5709 * are not directly readable from the driver
5710 * to test them we send debug packets
5711 */
5712static int bnx2x_int_mem_test(struct bnx2x *bp)
5713{
5714 int factor;
5715 int count, i;
5716 u32 val = 0;
5717
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005718 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005719 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07005720 else if (CHIP_REV_IS_EMUL(bp))
5721 factor = 200;
5722 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005723 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005724
5725 DP(NETIF_MSG_HW, "start part1\n");
5726
5727 /* Disable inputs of parser neighbor blocks */
5728 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5729 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5730 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005731 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005732
5733 /* Write 0 to parser credits for CFC search request */
5734 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5735
5736 /* send Ethernet packet */
5737 bnx2x_lb_pckt(bp);
5738
5739 /* TODO do i reset NIG statistic? */
5740 /* Wait until NIG register shows 1 packet of size 0x10 */
5741 count = 1000 * factor;
5742 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005743
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005744 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5745 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005746 if (val == 0x10)
5747 break;
5748
5749 msleep(10);
5750 count--;
5751 }
5752 if (val != 0x10) {
5753 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5754 return -1;
5755 }
5756
5757 /* Wait until PRS register shows 1 packet */
5758 count = 1000 * factor;
5759 while (count) {
5760 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005761 if (val == 1)
5762 break;
5763
5764 msleep(10);
5765 count--;
5766 }
5767 if (val != 0x1) {
5768 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5769 return -2;
5770 }
5771
5772 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005773 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005774 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005775 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005776 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005777 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5778 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005779
5780 DP(NETIF_MSG_HW, "part2\n");
5781
5782 /* Disable inputs of parser neighbor blocks */
5783 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5784 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5785 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005786 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005787
5788 /* Write 0 to parser credits for CFC search request */
5789 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5790
5791 /* send 10 Ethernet packets */
5792 for (i = 0; i < 10; i++)
5793 bnx2x_lb_pckt(bp);
5794
5795 /* Wait until NIG register shows 10 + 1
5796 packets of size 11*0x10 = 0xb0 */
5797 count = 1000 * factor;
5798 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005799
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005800 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5801 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005802 if (val == 0xb0)
5803 break;
5804
5805 msleep(10);
5806 count--;
5807 }
5808 if (val != 0xb0) {
5809 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5810 return -3;
5811 }
5812
5813 /* Wait until PRS register shows 2 packets */
5814 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5815 if (val != 2)
5816 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5817
5818 /* Write 1 to parser credits for CFC search request */
5819 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5820
5821 /* Wait until PRS register shows 3 packets */
5822 msleep(10 * factor);
5823 /* Wait until NIG register shows 1 packet of size 0x10 */
5824 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5825 if (val != 3)
5826 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5827
5828 /* clear NIG EOP FIFO */
5829 for (i = 0; i < 11; i++)
5830 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5831 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5832 if (val != 1) {
5833 BNX2X_ERR("clear of NIG failed\n");
5834 return -4;
5835 }
5836
5837 /* Reset and init BRB, PRS, NIG */
5838 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5839 msleep(50);
5840 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5841 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005842 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5843 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005844#ifndef BCM_ISCSI
5845 /* set NIC mode */
5846 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5847#endif
5848
5849 /* Enable inputs of parser neighbor blocks */
5850 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5851 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5852 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07005853 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005854
5855 DP(NETIF_MSG_HW, "done\n");
5856
5857 return 0; /* OK */
5858}
5859
5860static void enable_blocks_attention(struct bnx2x *bp)
5861{
5862 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5863 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5864 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5865 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5866 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5867 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5868 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5869 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5870 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005871/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5872/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005873 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5874 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5875 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005876/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5877/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005878 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5879 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5880 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5881 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005882/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5883/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5884 if (CHIP_REV_IS_FPGA(bp))
5885 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5886 else
5887 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005888 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5889 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5890 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005891/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5892/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005893 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5894 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005895/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5896 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005897}
5898
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005899
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005900static void bnx2x_reset_common(struct bnx2x *bp)
5901{
5902 /* reset_common */
5903 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5904 0xd3ffff7f);
5905 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5906}
5907
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005908
5909static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5910{
5911 u32 val;
5912 u8 port;
5913 u8 is_required = 0;
5914
5915 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5916 SHARED_HW_CFG_FAN_FAILURE_MASK;
5917
5918 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5919 is_required = 1;
5920
5921 /*
5922 * The fan failure mechanism is usually related to the PHY type since
5923 * the power consumption of the board is affected by the PHY. Currently,
5924 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5925 */
5926 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5927 for (port = PORT_0; port < PORT_MAX; port++) {
5928 u32 phy_type =
5929 SHMEM_RD(bp, dev_info.port_hw_config[port].
5930 external_phy_config) &
5931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5932 is_required |=
5933 ((phy_type ==
5934 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5935 (phy_type ==
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005936 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5937 (phy_type ==
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005938 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5939 }
5940
5941 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5942
5943 if (is_required == 0)
5944 return;
5945
5946 /* Fan failure is indicated by SPIO 5 */
5947 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5948 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5949
5950 /* set to active low mode */
5951 val = REG_RD(bp, MISC_REG_SPIO_INT);
5952 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5953 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5954 REG_WR(bp, MISC_REG_SPIO_INT, val);
5955
5956 /* enable interrupt to signal the IGU */
5957 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5958 val |= (1 << MISC_REGISTERS_SPIO_5);
5959 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5960}
5961
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005962static int bnx2x_init_common(struct bnx2x *bp)
5963{
5964 u32 val, i;
5965
5966 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5967
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00005968 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005969 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5970 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5971
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005972 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005973 if (CHIP_IS_E1H(bp))
5974 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5975
5976 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5977 msleep(30);
5978 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5979
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005980 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005981 if (CHIP_IS_E1(bp)) {
5982 /* enable HW interrupt from PXP on USDM overflow
5983 bit 16 on INT_MASK_0 */
5984 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005985 }
5986
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005987 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005988 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005989
5990#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005991 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5992 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5993 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5994 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5995 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00005996 /* make sure this value is 0 */
5997 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005998
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005999/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6000 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6001 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6002 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6003 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006004#endif
6005
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006006 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006007#ifdef BCM_ISCSI
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006008 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6009 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6010 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006011#endif
6012
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006013 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6014 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006015
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006016 /* let the HW do it's magic ... */
6017 msleep(100);
6018 /* finish PXP init */
6019 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6020 if (val != 1) {
6021 BNX2X_ERR("PXP2 CFG failed\n");
6022 return -EBUSY;
6023 }
6024 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6025 if (val != 1) {
6026 BNX2X_ERR("PXP2 RD_INIT failed\n");
6027 return -EBUSY;
6028 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006029
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006030 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6031 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006032
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006033 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006034
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006035 /* clean the DMAE memory */
6036 bp->dmae_ready = 1;
6037 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006038
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006039 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6040 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6041 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6042 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006043
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006044 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6045 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6046 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6047 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6048
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006049 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006050 /* soft reset pulse */
6051 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6052 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006053
6054#ifdef BCM_ISCSI
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006055 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006056#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006057
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006058 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006059 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6060 if (!CHIP_REV_IS_SLOW(bp)) {
6061 /* enable hw interrupt from doorbell Q */
6062 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6063 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006064
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006065 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6066 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08006067 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Eilon Greenstein3196a882008-08-13 15:58:49 -07006068 /* set NIC mode */
6069 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006070 if (CHIP_IS_E1H(bp))
6071 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006072
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006073 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6074 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6075 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6076 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006077
Eilon Greensteinca003922009-08-12 22:53:28 -07006078 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6079 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6080 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6081 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006082
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006083 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6084 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6085 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6086 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006087
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006088 /* sync semi rtc */
6089 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6090 0x80000000);
6091 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6092 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006093
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006094 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6095 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6096 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006097
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006098 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6099 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6100 REG_WR(bp, i, 0xc0cac01a);
6101 /* TODO: replace with something meaningful */
6102 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006103 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006104 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006105
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006106 if (sizeof(union cdu_context) != 1024)
6107 /* we currently assume that a context is 1024 bytes */
6108 printk(KERN_ALERT PFX "please adjust the size of"
6109 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006110
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006111 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006112 val = (4 << 24) + (0 << 12) + 1024;
6113 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006114
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006115 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006116 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006117 /* enable context validation interrupt from CFC */
6118 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6119
6120 /* set the thresholds to prevent CFC/CDU race */
6121 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006122
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006123 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6124 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006125
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006126 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006127 /* Reset PCIE errors for debug */
6128 REG_WR(bp, 0x2814, 0xffffffff);
6129 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006130
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006131 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006132 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006133 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006134 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006135
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006136 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006137 if (CHIP_IS_E1H(bp)) {
6138 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6139 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6140 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006141
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006142 if (CHIP_REV_IS_SLOW(bp))
6143 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006144
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006145 /* finish CFC init */
6146 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6147 if (val != 1) {
6148 BNX2X_ERR("CFC LL_INIT failed\n");
6149 return -EBUSY;
6150 }
6151 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6152 if (val != 1) {
6153 BNX2X_ERR("CFC AC_INIT failed\n");
6154 return -EBUSY;
6155 }
6156 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6157 if (val != 1) {
6158 BNX2X_ERR("CFC CAM_INIT failed\n");
6159 return -EBUSY;
6160 }
6161 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006162
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006163 /* read NIG statistic
6164 to see if this is our first up since powerup */
6165 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6166 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006167
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006168 /* do internal memory self test */
6169 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6170 BNX2X_ERR("internal mem self test failed\n");
6171 return -EBUSY;
6172 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006173
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006174 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006175 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6176 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6177 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006178 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eilon Greenstein46c6a672009-02-12 08:36:58 +00006179 bp->port.need_hw_lock = 1;
6180 break;
6181
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006182 default:
6183 break;
6184 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006185
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00006186 bnx2x_setup_fan_failure_detection(bp);
6187
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006188 /* clear PXP2 attentions */
6189 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006190
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006191 enable_blocks_attention(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006192
Yaniv Rosner6bbca912008-08-13 15:57:28 -07006193 if (!BP_NOMCP(bp)) {
6194 bnx2x_acquire_phy_lock(bp);
6195 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6196 bnx2x_release_phy_lock(bp);
6197 } else
6198 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6199
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006200 return 0;
6201}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006202
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006203static int bnx2x_init_port(struct bnx2x *bp)
6204{
6205 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006206 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00006207 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006208 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006209
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006210 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6211
6212 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006213
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006214 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006215 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006216
6217 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6218 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6219 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006220#ifdef BCM_ISCSI
6221 /* Port0 1
6222 * Port1 385 */
6223 i++;
6224 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6225 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6226 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6227 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6228
6229 /* Port0 2
6230 * Port1 386 */
6231 i++;
6232 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6233 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6234 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6235 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6236
6237 /* Port0 3
6238 * Port1 387 */
6239 i++;
6240 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6241 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6242 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6243 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6244#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006245 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006246
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006247#ifdef BCM_ISCSI
6248 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6249 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6250
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006251 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006252#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006253 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006254
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006255 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00006256 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6257 /* no pause for emulation and FPGA */
6258 low = 0;
6259 high = 513;
6260 } else {
6261 if (IS_E1HMF(bp))
6262 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6263 else if (bp->dev->mtu > 4096) {
6264 if (bp->flags & ONE_PORT_FLAG)
6265 low = 160;
6266 else {
6267 val = bp->dev->mtu;
6268 /* (24*1024 + val*4)/256 */
6269 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6270 }
6271 } else
6272 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6273 high = low + 56; /* 14*1024/256 */
6274 }
6275 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6276 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6277
6278
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006279 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07006280
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006281 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006282 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006283 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006284 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006285
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006286 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6287 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6288 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6289 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006290
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006291 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006292 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006293
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006294 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006295
6296 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006297 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006298
6299 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006300 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006301 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006302 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006303
6304 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006305 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006306 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006307 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006308
6309#ifdef BCM_ISCSI
6310 /* tell the searcher where the T2 table is */
6311 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6312
6313 wb_write[0] = U64_LO(bp->t2_mapping);
6314 wb_write[1] = U64_HI(bp->t2_mapping);
6315 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6316 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6317 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6318 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6319
6320 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006321#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006322 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006323 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006324
6325 if (CHIP_IS_E1(bp)) {
6326 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6327 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6328 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006329 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006330
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006331 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006332 /* init aeu_mask_attn_func_0/1:
6333 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6334 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6335 * bits 4-7 are used for "per vn group attention" */
6336 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6337 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6338
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006339 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006340 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006341 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006342 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006343 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006344
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006345 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006346
6347 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6348
6349 if (CHIP_IS_E1H(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006350 /* 0x2 disable e1hov, 0x1 enable */
6351 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6352 (IS_E1HMF(bp) ? 0x1 : 0x2));
6353
Eilon Greenstein1c063282009-02-12 08:36:43 +00006354 /* support pause requests from USDM, TSDM and BRB */
6355 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6356
6357 {
6358 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6359 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6360 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6361 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006362 }
6363
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006364 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006365 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006366
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006367 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
Eilon Greenstein589abe32009-02-12 08:36:55 +00006368 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6369 {
6370 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6371
6372 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6373 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6374
6375 /* The GPIO should be swapped if the swap register is
6376 set and active */
6377 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6378 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6379
6380 /* Select function upon port-swap configuration */
6381 if (port == 0) {
6382 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6383 aeu_gpio_mask = (swap_val && swap_override) ?
6384 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6385 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6386 } else {
6387 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6388 aeu_gpio_mask = (swap_val && swap_override) ?
6389 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6390 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6391 }
6392 val = REG_RD(bp, offset);
6393 /* add GPIO3 to group */
6394 val |= aeu_gpio_mask;
6395 REG_WR(bp, offset, val);
6396 }
6397 break;
6398
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006399 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006400 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08006401 /* add SPIO 5 to group 0 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006402 {
6403 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6404 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6405 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08006406 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006407 REG_WR(bp, reg_addr, val);
6408 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08006409 break;
6410
6411 default:
6412 break;
6413 }
6414
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006415 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006416
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006417 return 0;
6418}
6419
6420#define ILT_PER_FUNC (768/2)
6421#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6422/* the phys address is shifted right 12 bits and has an added
6423 1=valid bit added to the 53rd bit
6424 then since this is a wide register(TM)
6425 we split it into two 32 bit writes
6426 */
6427#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6428#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6429#define PXP_ONE_ILT(x) (((x) << 10) | x)
6430#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6431
6432#define CNIC_ILT_LINES 0
6433
6434static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6435{
6436 int reg;
6437
6438 if (CHIP_IS_E1H(bp))
6439 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6440 else /* E1 */
6441 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6442
6443 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6444}
6445
6446static int bnx2x_init_func(struct bnx2x *bp)
6447{
6448 int port = BP_PORT(bp);
6449 int func = BP_FUNC(bp);
Eilon Greenstein8badd272009-02-12 08:36:15 +00006450 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006451 int i;
6452
6453 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6454
Eilon Greenstein8badd272009-02-12 08:36:15 +00006455 /* set MSI reconfigure capability */
6456 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6457 val = REG_RD(bp, addr);
6458 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6459 REG_WR(bp, addr, val);
6460
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006461 i = FUNC_ILT_BASE(func);
6462
6463 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6464 if (CHIP_IS_E1H(bp)) {
6465 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6466 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6467 } else /* E1 */
6468 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6469 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6470
6471
6472 if (CHIP_IS_E1H(bp)) {
6473 for (i = 0; i < 9; i++)
6474 bnx2x_init_block(bp,
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006475 cm_blocks[i], FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006476
6477 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6478 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6479 }
6480
6481 /* HC init per function */
6482 if (CHIP_IS_E1H(bp)) {
6483 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6484
6485 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6486 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6487 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07006488 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006489
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006490 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006491 REG_WR(bp, 0x2114, 0xffffffff);
6492 REG_WR(bp, 0x2120, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006493
6494 return 0;
6495}
6496
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006497static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6498{
6499 int i, rc = 0;
6500
6501 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6502 BP_FUNC(bp), load_code);
6503
6504 bp->dmae_ready = 0;
6505 mutex_init(&bp->dmae_mutex);
6506 bnx2x_gunzip_init(bp);
6507
6508 switch (load_code) {
6509 case FW_MSG_CODE_DRV_LOAD_COMMON:
6510 rc = bnx2x_init_common(bp);
6511 if (rc)
6512 goto init_hw_err;
6513 /* no break */
6514
6515 case FW_MSG_CODE_DRV_LOAD_PORT:
6516 bp->dmae_ready = 1;
6517 rc = bnx2x_init_port(bp);
6518 if (rc)
6519 goto init_hw_err;
6520 /* no break */
6521
6522 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6523 bp->dmae_ready = 1;
6524 rc = bnx2x_init_func(bp);
6525 if (rc)
6526 goto init_hw_err;
6527 break;
6528
6529 default:
6530 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6531 break;
6532 }
6533
6534 if (!BP_NOMCP(bp)) {
6535 int func = BP_FUNC(bp);
6536
6537 bp->fw_drv_pulse_wr_seq =
6538 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6539 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00006540 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6541 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006542
6543 /* this needs to be done before gunzip end */
6544 bnx2x_zero_def_sb(bp);
6545 for_each_queue(bp, i)
6546 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6547
6548init_hw_err:
6549 bnx2x_gunzip_end(bp);
6550
6551 return rc;
6552}
6553
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006554static void bnx2x_free_mem(struct bnx2x *bp)
6555{
6556
6557#define BNX2X_PCI_FREE(x, y, size) \
6558 do { \
6559 if (x) { \
6560 pci_free_consistent(bp->pdev, size, x, y); \
6561 x = NULL; \
6562 y = 0; \
6563 } \
6564 } while (0)
6565
6566#define BNX2X_FREE(x) \
6567 do { \
6568 if (x) { \
6569 vfree(x); \
6570 x = NULL; \
6571 } \
6572 } while (0)
6573
6574 int i;
6575
6576 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006577 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006578 for_each_queue(bp, i) {
6579
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006580 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006581 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6582 bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006583 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006584 }
6585 /* Rx */
6586 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006587
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006588 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006589 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6590 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6591 bnx2x_fp(bp, i, rx_desc_mapping),
6592 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6593
6594 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6595 bnx2x_fp(bp, i, rx_comp_mapping),
6596 sizeof(struct eth_fast_path_rx_cqe) *
6597 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006598
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006599 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07006600 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006601 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6602 bnx2x_fp(bp, i, rx_sge_mapping),
6603 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6604 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006605 /* Tx */
6606 for_each_tx_queue(bp, i) {
6607
6608 /* fastpath tx rings: tx_buf tx_desc */
6609 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6610 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6611 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006612 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006613 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006614 /* end of fastpath */
6615
6616 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006617 sizeof(struct host_def_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006618
6619 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006620 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006621
6622#ifdef BCM_ISCSI
6623 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6624 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6625 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6626 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6627#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006628 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006629
6630#undef BNX2X_PCI_FREE
6631#undef BNX2X_KFREE
6632}
6633
6634static int bnx2x_alloc_mem(struct bnx2x *bp)
6635{
6636
6637#define BNX2X_PCI_ALLOC(x, y, size) \
6638 do { \
6639 x = pci_alloc_consistent(bp->pdev, size, y); \
6640 if (x == NULL) \
6641 goto alloc_mem_err; \
6642 memset(x, 0, size); \
6643 } while (0)
6644
6645#define BNX2X_ALLOC(x, size) \
6646 do { \
6647 x = vmalloc(size); \
6648 if (x == NULL) \
6649 goto alloc_mem_err; \
6650 memset(x, 0, size); \
6651 } while (0)
6652
6653 int i;
6654
6655 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006656 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006657 for_each_queue(bp, i) {
6658 bnx2x_fp(bp, i, bp) = bp;
6659
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006660 /* status blocks */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006661 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6662 &bnx2x_fp(bp, i, status_blk_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006663 sizeof(struct host_status_block));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006664 }
6665 /* Rx */
6666 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006667
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006668 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006669 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6670 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6671 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6672 &bnx2x_fp(bp, i, rx_desc_mapping),
6673 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6674
6675 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6676 &bnx2x_fp(bp, i, rx_comp_mapping),
6677 sizeof(struct eth_fast_path_rx_cqe) *
6678 NUM_RCQ_BD);
6679
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006680 /* SGE ring */
6681 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6682 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6683 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6684 &bnx2x_fp(bp, i, rx_sge_mapping),
6685 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006686 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006687 /* Tx */
6688 for_each_tx_queue(bp, i) {
6689
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006690 /* fastpath tx rings: tx_buf tx_desc */
6691 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6692 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6693 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6694 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006695 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006696 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006697 /* end of fastpath */
6698
6699 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6700 sizeof(struct host_def_status_block));
6701
6702 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6703 sizeof(struct bnx2x_slowpath));
6704
6705#ifdef BCM_ISCSI
6706 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6707
6708 /* Initialize T1 */
6709 for (i = 0; i < 64*1024; i += 64) {
6710 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6711 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6712 }
6713
6714 /* allocate searcher T2 table
6715 we allocate 1/4 of alloc num for T2
6716 (which is not entered into the ILT) */
6717 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6718
6719 /* Initialize T2 */
6720 for (i = 0; i < 16*1024; i += 64)
6721 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6722
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006723 /* now fixup the last line in the block to point to the next block */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006724 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6725
6726 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6727 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6728
6729 /* QM queues (128*MAX_CONN) */
6730 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6731#endif
6732
6733 /* Slow path ring */
6734 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6735
6736 return 0;
6737
6738alloc_mem_err:
6739 bnx2x_free_mem(bp);
6740 return -ENOMEM;
6741
6742#undef BNX2X_PCI_ALLOC
6743#undef BNX2X_ALLOC
6744}
6745
6746static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6747{
6748 int i;
6749
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006750 for_each_tx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006751 struct bnx2x_fastpath *fp = &bp->fp[i];
6752
6753 u16 bd_cons = fp->tx_bd_cons;
6754 u16 sw_prod = fp->tx_pkt_prod;
6755 u16 sw_cons = fp->tx_pkt_cons;
6756
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006757 while (sw_cons != sw_prod) {
6758 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6759 sw_cons++;
6760 }
6761 }
6762}
6763
6764static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6765{
6766 int i, j;
6767
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006768 for_each_rx_queue(bp, j) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006769 struct bnx2x_fastpath *fp = &bp->fp[j];
6770
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006771 for (i = 0; i < NUM_RX_BD; i++) {
6772 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6773 struct sk_buff *skb = rx_buf->skb;
6774
6775 if (skb == NULL)
6776 continue;
6777
6778 pci_unmap_single(bp->pdev,
6779 pci_unmap_addr(rx_buf, mapping),
Eilon Greenstein356e2382009-02-12 08:38:32 +00006780 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006781
6782 rx_buf->skb = NULL;
6783 dev_kfree_skb(skb);
6784 }
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006785 if (!fp->disable_tpa)
Eilon Greenstein32626232008-08-13 15:51:07 -07006786 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6787 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006788 ETH_MAX_AGGREGATION_QUEUES_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006789 }
6790}
6791
6792static void bnx2x_free_skbs(struct bnx2x *bp)
6793{
6794 bnx2x_free_tx_skbs(bp);
6795 bnx2x_free_rx_skbs(bp);
6796}
6797
6798static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6799{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006800 int i, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006801
6802 free_irq(bp->msix_table[0].vector, bp->dev);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006803 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006804 bp->msix_table[0].vector);
6805
6806 for_each_queue(bp, i) {
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006807 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006808 "state %x\n", i, bp->msix_table[i + offset].vector,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006809 bnx2x_fp(bp, i, state));
6810
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006811 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006812 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006813}
6814
6815static void bnx2x_free_irq(struct bnx2x *bp)
6816{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006817 if (bp->flags & USING_MSIX_FLAG) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006818 bnx2x_free_msix_irqs(bp);
6819 pci_disable_msix(bp->pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006820 bp->flags &= ~USING_MSIX_FLAG;
6821
Eilon Greenstein8badd272009-02-12 08:36:15 +00006822 } else if (bp->flags & USING_MSI_FLAG) {
6823 free_irq(bp->pdev->irq, bp->dev);
6824 pci_disable_msi(bp->pdev);
6825 bp->flags &= ~USING_MSI_FLAG;
6826
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006827 } else
6828 free_irq(bp->pdev->irq, bp->dev);
6829}
6830
6831static int bnx2x_enable_msix(struct bnx2x *bp)
6832{
Eilon Greenstein8badd272009-02-12 08:36:15 +00006833 int i, rc, offset = 1;
6834 int igu_vec = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006835
Eilon Greenstein8badd272009-02-12 08:36:15 +00006836 bp->msix_table[0].entry = igu_vec;
6837 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006838
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006839 for_each_queue(bp, i) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006840 igu_vec = BP_L_ID(bp) + offset + i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006841 bp->msix_table[i + offset].entry = igu_vec;
6842 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6843 "(fastpath #%u)\n", i + offset, igu_vec, i);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006844 }
6845
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006846 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006847 BNX2X_NUM_QUEUES(bp) + offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006848 if (rc) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00006849 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6850 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006851 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00006852
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006853 bp->flags |= USING_MSIX_FLAG;
6854
6855 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006856}
6857
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006858static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6859{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006860 int i, rc, offset = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006861
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006862 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6863 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006864 if (rc) {
6865 BNX2X_ERR("request sp irq failed\n");
6866 return -EBUSY;
6867 }
6868
6869 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006870 struct bnx2x_fastpath *fp = &bp->fp[i];
6871
Eilon Greensteinca003922009-08-12 22:53:28 -07006872 if (i < bp->num_rx_queues)
6873 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6874 else
6875 sprintf(fp->name, "%s-tx-%d",
6876 bp->dev->name, i - bp->num_rx_queues);
6877
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006878 rc = request_irq(bp->msix_table[i + offset].vector,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006879 bnx2x_msix_fp_int, 0, fp->name, fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006880 if (rc) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006881 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006882 bnx2x_free_msix_irqs(bp);
6883 return -EBUSY;
6884 }
6885
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006886 fp->state = BNX2X_FP_STATE_IRQ;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006887 }
6888
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006889 i = BNX2X_NUM_QUEUES(bp);
Eilon Greensteinca003922009-08-12 22:53:28 -07006890 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6891 " ... fp[%d] %d\n",
6892 bp->dev->name, bp->msix_table[0].vector,
6893 0, bp->msix_table[offset].vector,
6894 i - 1, bp->msix_table[offset + i - 1].vector);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006895
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006896 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006897}
6898
Eilon Greenstein8badd272009-02-12 08:36:15 +00006899static int bnx2x_enable_msi(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006900{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006901 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006902
Eilon Greenstein8badd272009-02-12 08:36:15 +00006903 rc = pci_enable_msi(bp->pdev);
6904 if (rc) {
6905 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6906 return -1;
6907 }
6908 bp->flags |= USING_MSI_FLAG;
6909
6910 return 0;
6911}
6912
6913static int bnx2x_req_irq(struct bnx2x *bp)
6914{
6915 unsigned long flags;
6916 int rc;
6917
6918 if (bp->flags & USING_MSI_FLAG)
6919 flags = 0;
6920 else
6921 flags = IRQF_SHARED;
6922
6923 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006924 bp->dev->name, bp->dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006925 if (!rc)
6926 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6927
6928 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006929}
6930
Yitchak Gertner65abd742008-08-25 15:26:24 -07006931static void bnx2x_napi_enable(struct bnx2x *bp)
6932{
6933 int i;
6934
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006935 for_each_rx_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006936 napi_enable(&bnx2x_fp(bp, i, napi));
6937}
6938
6939static void bnx2x_napi_disable(struct bnx2x *bp)
6940{
6941 int i;
6942
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006943 for_each_rx_queue(bp, i)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006944 napi_disable(&bnx2x_fp(bp, i, napi));
6945}
6946
6947static void bnx2x_netif_start(struct bnx2x *bp)
6948{
Eilon Greensteine1510702009-07-21 05:47:41 +00006949 int intr_sem;
6950
6951 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6952 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6953
6954 if (intr_sem) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006955 if (netif_running(bp->dev)) {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006956 bnx2x_napi_enable(bp);
6957 bnx2x_int_enable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006958 if (bp->state == BNX2X_STATE_OPEN)
6959 netif_tx_wake_all_queues(bp->dev);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006960 }
6961 }
6962}
6963
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07006964static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
Yitchak Gertner65abd742008-08-25 15:26:24 -07006965{
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07006966 bnx2x_int_disable_sync(bp, disable_hw);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00006967 bnx2x_napi_disable(bp);
Eilon Greenstein762d5f62009-03-02 07:59:56 +00006968 netif_tx_disable(bp->dev);
6969 bp->dev->trans_start = jiffies; /* prevent tx timeout */
Yitchak Gertner65abd742008-08-25 15:26:24 -07006970}
6971
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006972/*
6973 * Init service functions
6974 */
6975
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006976static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006977{
6978 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006979 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006980
6981 /* CAM allocation
6982 * unicasts 0-31:port0 32-63:port1
6983 * multicast 64-127:port0 128-191:port1
6984 */
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006985 config->hdr.length = 2;
Eilon Greensteinaf246402009-01-14 06:43:59 +00006986 config->hdr.offset = port ? 32 : 0;
Eilon Greenstein0626b892009-02-12 08:38:14 +00006987 config->hdr.client_id = bp->fp->cl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006988 config->hdr.reserved1 = 0;
6989
6990 /* primary MAC */
6991 config->config_table[0].cam_entry.msb_mac_addr =
6992 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6993 config->config_table[0].cam_entry.middle_mac_addr =
6994 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6995 config->config_table[0].cam_entry.lsb_mac_addr =
6996 swab16(*(u16 *)&bp->dev->dev_addr[4]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006997 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006998 if (set)
6999 config->config_table[0].target_table_entry.flags = 0;
7000 else
7001 CAM_INVALIDATE(config->config_table[0]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007002 config->config_table[0].target_table_entry.clients_bit_vector =
7003 cpu_to_le32(1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007004 config->config_table[0].target_table_entry.vlan_id = 0;
7005
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007006 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7007 (set ? "setting" : "clearing"),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007008 config->config_table[0].cam_entry.msb_mac_addr,
7009 config->config_table[0].cam_entry.middle_mac_addr,
7010 config->config_table[0].cam_entry.lsb_mac_addr);
7011
7012 /* broadcast */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007013 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7014 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7015 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007016 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007017 if (set)
7018 config->config_table[1].target_table_entry.flags =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007019 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007020 else
7021 CAM_INVALIDATE(config->config_table[1]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007022 config->config_table[1].target_table_entry.clients_bit_vector =
7023 cpu_to_le32(1 << BP_L_ID(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007024 config->config_table[1].target_table_entry.vlan_id = 0;
7025
7026 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7027 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7028 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7029}
7030
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007031static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007032{
7033 struct mac_configuration_cmd_e1h *config =
7034 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7035
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007036 /* CAM allocation for E1H
7037 * unicasts: by func number
7038 * multicast: 20+FUNC*20, 20 each
7039 */
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007040 config->hdr.length = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007041 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007042 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007043 config->hdr.reserved1 = 0;
7044
7045 /* primary MAC */
7046 config->config_table[0].msb_mac_addr =
7047 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7048 config->config_table[0].middle_mac_addr =
7049 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7050 config->config_table[0].lsb_mac_addr =
7051 swab16(*(u16 *)&bp->dev->dev_addr[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07007052 config->config_table[0].clients_bit_vector =
7053 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007054 config->config_table[0].vlan_id = 0;
7055 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007056 if (set)
7057 config->config_table[0].flags = BP_PORT(bp);
7058 else
7059 config->config_table[0].flags =
7060 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007061
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007062 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7063 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007064 config->config_table[0].msb_mac_addr,
7065 config->config_table[0].middle_mac_addr,
7066 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7067
7068 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7069 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7070 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7071}
7072
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007073static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7074 int *state_p, int poll)
7075{
7076 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007077 int cnt = 5000;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007078
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007079 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7080 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007081
7082 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007083 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007084 if (poll) {
7085 bnx2x_rx_int(bp->fp, 10);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007086 /* if index is different from 0
7087 * the reply for some commands will
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007088 * be on the non default queue
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007089 */
7090 if (idx)
7091 bnx2x_rx_int(&bp->fp[idx], 10);
7092 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007093
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07007094 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007095 if (*state_p == state) {
7096#ifdef BNX2X_STOP_ON_ERROR
7097 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7098#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007099 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00007100 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007101
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007102 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007103 }
7104
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007105 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007106 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7107 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007108#ifdef BNX2X_STOP_ON_ERROR
7109 bnx2x_panic();
7110#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007111
Eliezer Tamir49d66772008-02-28 11:53:13 -08007112 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007113}
7114
7115static int bnx2x_setup_leading(struct bnx2x *bp)
7116{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007117 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007118
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007119 /* reset IGU state */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007120 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007121
7122 /* SETUP ramrod */
7123 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7124
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007125 /* Wait for completion */
7126 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007127
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007128 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007129}
7130
7131static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7132{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007133 struct bnx2x_fastpath *fp = &bp->fp[index];
7134
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007135 /* reset IGU state */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007136 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007137
Eliezer Tamir228241e2008-02-28 11:56:57 -08007138 /* SETUP ramrod */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007139 fp->state = BNX2X_FP_STATE_OPENING;
7140 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7141 fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007142
7143 /* Wait for completion */
7144 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007145 &(fp->state), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007146}
7147
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007148static int bnx2x_poll(struct napi_struct *napi, int budget);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007149
Eilon Greensteinca003922009-08-12 22:53:28 -07007150static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7151 int *num_tx_queues_out)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007152{
Eilon Greensteinca003922009-08-12 22:53:28 -07007153 int _num_rx_queues = 0, _num_tx_queues = 0;
7154
7155 switch (bp->multi_mode) {
7156 case ETH_RSS_MODE_DISABLED:
7157 _num_rx_queues = 1;
7158 _num_tx_queues = 1;
7159 break;
7160
7161 case ETH_RSS_MODE_REGULAR:
7162 if (num_rx_queues)
7163 _num_rx_queues = min_t(u32, num_rx_queues,
7164 BNX2X_MAX_QUEUES(bp));
7165 else
7166 _num_rx_queues = min_t(u32, num_online_cpus(),
7167 BNX2X_MAX_QUEUES(bp));
7168
7169 if (num_tx_queues)
7170 _num_tx_queues = min_t(u32, num_tx_queues,
7171 BNX2X_MAX_QUEUES(bp));
7172 else
7173 _num_tx_queues = min_t(u32, num_online_cpus(),
7174 BNX2X_MAX_QUEUES(bp));
7175
7176 /* There must be not more Tx queues than Rx queues */
7177 if (_num_tx_queues > _num_rx_queues) {
7178 BNX2X_ERR("number of tx queues (%d) > "
7179 "number of rx queues (%d)"
7180 " defaulting to %d\n",
7181 _num_tx_queues, _num_rx_queues,
7182 _num_rx_queues);
7183 _num_tx_queues = _num_rx_queues;
7184 }
7185 break;
7186
7187
7188 default:
7189 _num_rx_queues = 1;
7190 _num_tx_queues = 1;
7191 break;
7192 }
7193
7194 *num_rx_queues_out = _num_rx_queues;
7195 *num_tx_queues_out = _num_tx_queues;
7196}
7197
7198static int bnx2x_set_int_mode(struct bnx2x *bp)
7199{
7200 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007201
Eilon Greenstein8badd272009-02-12 08:36:15 +00007202 switch (int_mode) {
7203 case INT_MODE_INTx:
7204 case INT_MODE_MSI:
Eilon Greensteinca003922009-08-12 22:53:28 -07007205 bp->num_rx_queues = 1;
7206 bp->num_tx_queues = 1;
7207 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greenstein8badd272009-02-12 08:36:15 +00007208 break;
7209
7210 case INT_MODE_MSIX:
7211 default:
Eilon Greensteinca003922009-08-12 22:53:28 -07007212 /* Set interrupt mode according to bp->multi_mode value */
7213 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7214 &bp->num_tx_queues);
7215
7216 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007217 bp->num_rx_queues, bp->num_tx_queues);
Eilon Greensteinca003922009-08-12 22:53:28 -07007218
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007219 /* if we can't use MSI-X we only need one fp,
7220 * so try to enable MSI-X with the requested number of fp's
7221 * and fallback to MSI or legacy INTx with one fp
7222 */
Eilon Greensteinca003922009-08-12 22:53:28 -07007223 rc = bnx2x_enable_msix(bp);
7224 if (rc) {
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007225 /* failed to enable MSI-X */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007226 if (bp->multi_mode)
7227 BNX2X_ERR("Multi requested but failed to "
Eilon Greensteinca003922009-08-12 22:53:28 -07007228 "enable MSI-X (rx %d tx %d), "
7229 "set number of queues to 1\n",
7230 bp->num_rx_queues, bp->num_tx_queues);
7231 bp->num_rx_queues = 1;
7232 bp->num_tx_queues = 1;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007233 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007234 break;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007235 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007236 bp->dev->real_num_tx_queues = bp->num_tx_queues;
Eilon Greensteinca003922009-08-12 22:53:28 -07007237 return rc;
Eilon Greenstein8badd272009-02-12 08:36:15 +00007238}
7239
Eilon Greenstein8badd272009-02-12 08:36:15 +00007240
7241/* must be called with rtnl_lock */
7242static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7243{
7244 u32 load_code;
Eilon Greensteinca003922009-08-12 22:53:28 -07007245 int i, rc;
7246
Eilon Greenstein8badd272009-02-12 08:36:15 +00007247#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8badd272009-02-12 08:36:15 +00007248 if (unlikely(bp->panic))
7249 return -EPERM;
7250#endif
7251
7252 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7253
Eilon Greensteinca003922009-08-12 22:53:28 -07007254 rc = bnx2x_set_int_mode(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007255
7256 if (bnx2x_alloc_mem(bp))
7257 return -ENOMEM;
7258
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007259 for_each_rx_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007260 bnx2x_fp(bp, i, disable_tpa) =
7261 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7262
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007263 for_each_rx_queue(bp, i)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007264 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7265 bnx2x_poll, 128);
7266
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007267 bnx2x_napi_enable(bp);
7268
7269 if (bp->flags & USING_MSIX_FLAG) {
7270 rc = bnx2x_req_msix_irqs(bp);
7271 if (rc) {
7272 pci_disable_msix(bp->pdev);
7273 goto load_error1;
7274 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007275 } else {
Eilon Greensteinca003922009-08-12 22:53:28 -07007276 /* Fall to INTx if failed to enable MSI-X due to lack of
7277 memory (in bnx2x_set_int_mode()) */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007278 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7279 bnx2x_enable_msi(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007280 bnx2x_ack_int(bp);
7281 rc = bnx2x_req_irq(bp);
7282 if (rc) {
7283 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
Eilon Greenstein8badd272009-02-12 08:36:15 +00007284 if (bp->flags & USING_MSI_FLAG)
7285 pci_disable_msi(bp->pdev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007286 goto load_error1;
7287 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00007288 if (bp->flags & USING_MSI_FLAG) {
7289 bp->dev->irq = bp->pdev->irq;
7290 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7291 bp->dev->name, bp->pdev->irq);
7292 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007293 }
7294
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007295 /* Send LOAD_REQUEST command to MCP
7296 Returns the type of LOAD command:
7297 if it is the first port to be initialized
7298 common blocks should be initialized, otherwise - not
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007299 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007300 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007301 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7302 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007303 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007304 rc = -EBUSY;
7305 goto load_error2;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007306 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007307 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7308 rc = -EBUSY; /* other port in diagnostic mode */
7309 goto load_error2;
7310 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007311
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007312 } else {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007313 int port = BP_PORT(bp);
7314
Eilon Greensteinf5372252009-02-12 08:38:30 +00007315 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007316 load_count[0], load_count[1], load_count[2]);
7317 load_count[0]++;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007318 load_count[1 + port]++;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007319 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007320 load_count[0], load_count[1], load_count[2]);
7321 if (load_count[0] == 1)
7322 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007323 else if (load_count[1 + port] == 1)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007324 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7325 else
7326 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007327 }
7328
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007329 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7330 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7331 bp->port.pmf = 1;
7332 else
7333 bp->port.pmf = 0;
7334 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7335
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007336 /* Initialize HW */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007337 rc = bnx2x_init_hw(bp, load_code);
7338 if (rc) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007339 BNX2X_ERR("HW init failed, aborting\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007340 goto load_error2;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007341 }
7342
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007343 /* Setup NIC internals and enable interrupts */
Eilon Greenstein471de712008-08-13 15:49:35 -07007344 bnx2x_nic_init(bp, load_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007345
Eilon Greenstein2691d512009-08-12 08:22:08 +00007346 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7347 (bp->common.shmem2_base))
7348 SHMEM2_WR(bp, dcc_support,
7349 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7350 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7351
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007352 /* Send LOAD_DONE command to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007353 if (!BP_NOMCP(bp)) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007354 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7355 if (!load_code) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007356 BNX2X_ERR("MCP response failure, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007357 rc = -EBUSY;
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007358 goto load_error3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007359 }
7360 }
7361
7362 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7363
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007364 rc = bnx2x_setup_leading(bp);
7365 if (rc) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007366 BNX2X_ERR("Setup leading failed!\n");
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007367 goto load_error3;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007368 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007369
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007370 if (CHIP_IS_E1H(bp))
7371 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007372 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007373 bp->state = BNX2X_STATE_DISABLED;
7374 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007375
Eilon Greensteinca003922009-08-12 22:53:28 -07007376 if (bp->state == BNX2X_STATE_OPEN) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007377 for_each_nondefault_queue(bp, i) {
7378 rc = bnx2x_setup_multi(bp, i);
7379 if (rc)
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007380 goto load_error3;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007381 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007382
Eilon Greensteinca003922009-08-12 22:53:28 -07007383 if (CHIP_IS_E1(bp))
7384 bnx2x_set_mac_addr_e1(bp, 1);
7385 else
7386 bnx2x_set_mac_addr_e1h(bp, 1);
7387 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007388
7389 if (bp->port.pmf)
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00007390 bnx2x_initial_phy_init(bp, load_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007391
7392 /* Start fast path */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007393 switch (load_mode) {
7394 case LOAD_NORMAL:
Eilon Greensteinca003922009-08-12 22:53:28 -07007395 if (bp->state == BNX2X_STATE_OPEN) {
7396 /* Tx queue should be only reenabled */
7397 netif_tx_wake_all_queues(bp->dev);
7398 }
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007399 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007400 bnx2x_set_rx_mode(bp->dev);
7401 break;
7402
7403 case LOAD_OPEN:
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007404 netif_tx_start_all_queues(bp->dev);
Eilon Greensteinca003922009-08-12 22:53:28 -07007405 if (bp->state != BNX2X_STATE_OPEN)
7406 netif_tx_disable(bp->dev);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007407 /* Initialize the receive filter. */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007408 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007409 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007410
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007411 case LOAD_DIAG:
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007412 /* Initialize the receive filter. */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007413 bnx2x_set_rx_mode(bp->dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007414 bp->state = BNX2X_STATE_DIAG;
7415 break;
7416
7417 default:
7418 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007419 }
7420
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007421 if (!bp->port.pmf)
7422 bnx2x__link_status_update(bp);
7423
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007424 /* start the timer */
7425 mod_timer(&bp->timer, jiffies + bp->current_interval);
7426
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007427
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007428 return 0;
7429
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007430load_error3:
7431 bnx2x_int_disable_sync(bp, 1);
7432 if (!BP_NOMCP(bp)) {
7433 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7434 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7435 }
7436 bp->port.pmf = 0;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007437 /* Free SKBs, SGEs, TPA pool and driver internals */
7438 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007439 for_each_rx_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007440 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007441load_error2:
Yitchak Gertnerd1014632008-08-25 15:25:45 -07007442 /* Release IRQs */
7443 bnx2x_free_irq(bp);
Eilon Greenstein2dfe0e12009-01-22 03:37:44 +00007444load_error1:
7445 bnx2x_napi_disable(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007446 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007447 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007448 bnx2x_free_mem(bp);
7449
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007450 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007451}
7452
7453static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7454{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007455 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007456 int rc;
7457
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007458 /* halt the connection */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007459 fp->state = BNX2X_FP_STATE_HALTING;
7460 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007461
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007462 /* Wait for completion */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007463 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007464 &(fp->state), 1);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007465 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007466 return rc;
7467
7468 /* delete cfc entry */
7469 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7470
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007471 /* Wait for completion */
7472 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007473 &(fp->state), 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007474 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007475}
7476
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007477static int bnx2x_stop_leading(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007478{
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00007479 __le16 dsb_sp_prod_idx;
Eliezer Tamirc14423f2008-02-28 11:49:42 -08007480 /* if the other port is handling traffic,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007481 this can take a lot of time */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007482 int cnt = 500;
7483 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007484
7485 might_sleep();
7486
7487 /* Send HALT ramrod */
7488 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
Eilon Greenstein0626b892009-02-12 08:38:14 +00007489 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007490
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007491 /* Wait for completion */
7492 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7493 &(bp->fp[0].state), 1);
7494 if (rc) /* timeout */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007495 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007496
Eliezer Tamir49d66772008-02-28 11:53:13 -08007497 dsb_sp_prod_idx = *bp->dsb_sp_prod;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007498
Eliezer Tamir228241e2008-02-28 11:56:57 -08007499 /* Send PORT_DELETE ramrod */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007500 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7501
Eliezer Tamir49d66772008-02-28 11:53:13 -08007502 /* Wait for completion to arrive on default status block
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007503 we are going to reset the chip anyway
7504 so there is not much to do if this times out
7505 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007506 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007507 if (!cnt) {
7508 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7509 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7510 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7511#ifdef BNX2X_STOP_ON_ERROR
7512 bnx2x_panic();
7513#endif
Eilon Greenstein36e552a2009-02-12 08:37:21 +00007514 rc = -EBUSY;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007515 break;
7516 }
7517 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007518 msleep(1);
Eilon Greenstein5650d9d2009-01-22 06:01:29 +00007519 rmb(); /* Refresh the dsb_sp_prod */
Eliezer Tamir49d66772008-02-28 11:53:13 -08007520 }
7521 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7522 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007523
7524 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007525}
7526
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007527static void bnx2x_reset_func(struct bnx2x *bp)
7528{
7529 int port = BP_PORT(bp);
7530 int func = BP_FUNC(bp);
7531 int base, i;
Eliezer Tamir49d66772008-02-28 11:53:13 -08007532
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007533 /* Configure IGU */
7534 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7535 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7536
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007537 /* Clear ILT */
7538 base = FUNC_ILT_BASE(func);
7539 for (i = base; i < base + ILT_PER_FUNC; i++)
7540 bnx2x_ilt_wr(bp, i, 0);
7541}
7542
7543static void bnx2x_reset_port(struct bnx2x *bp)
7544{
7545 int port = BP_PORT(bp);
7546 u32 val;
7547
7548 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7549
7550 /* Do not rcv packets to BRB */
7551 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7552 /* Do not direct rcv packets that are not for MCP to the BRB */
7553 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7554 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7555
7556 /* Configure AEU */
7557 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7558
7559 msleep(100);
7560 /* Check for BRB port occupancy */
7561 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7562 if (val)
7563 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007564 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007565
7566 /* TODO: Close Doorbell port? */
7567}
7568
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007569static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7570{
7571 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7572 BP_FUNC(bp), reset_code);
7573
7574 switch (reset_code) {
7575 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7576 bnx2x_reset_port(bp);
7577 bnx2x_reset_func(bp);
7578 bnx2x_reset_common(bp);
7579 break;
7580
7581 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7582 bnx2x_reset_port(bp);
7583 bnx2x_reset_func(bp);
7584 break;
7585
7586 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7587 bnx2x_reset_func(bp);
7588 break;
7589
7590 default:
7591 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7592 break;
7593 }
7594}
7595
Eilon Greenstein33471622008-08-13 15:59:08 -07007596/* must be called with rtnl_lock */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007597static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007598{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007599 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007600 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007601 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007602
7603 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7604
Eliezer Tamir228241e2008-02-28 11:56:57 -08007605 bp->rx_mode = BNX2X_RX_MODE_NONE;
7606 bnx2x_set_storm_rx_mode(bp);
7607
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07007608 bnx2x_netif_stop(bp, 1);
Eilon Greensteine94d8af2009-01-22 03:37:36 +00007609
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007610 del_timer_sync(&bp->timer);
7611 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7612 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007613 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007614
Eilon Greenstein70b99862009-01-14 06:43:48 +00007615 /* Release IRQs */
7616 bnx2x_free_irq(bp);
7617
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007618 /* Wait until tx fastpath tasks complete */
7619 for_each_tx_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007620 struct bnx2x_fastpath *fp = &bp->fp[i];
7621
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007622 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007623 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007624
Eilon Greenstein7961f792009-03-02 07:59:31 +00007625 bnx2x_tx_int(fp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007626 if (!cnt) {
7627 BNX2X_ERR("timeout waiting for queue[%d]\n",
7628 i);
7629#ifdef BNX2X_STOP_ON_ERROR
7630 bnx2x_panic();
7631 return -EBUSY;
7632#else
7633 break;
7634#endif
7635 }
7636 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007637 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007638 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007639 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007640 /* Give HW time to discard old tx messages */
7641 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007642
Yitchak Gertner65abd742008-08-25 15:26:24 -07007643 if (CHIP_IS_E1(bp)) {
7644 struct mac_configuration_cmd *config =
7645 bnx2x_sp(bp, mcast_config);
7646
7647 bnx2x_set_mac_addr_e1(bp, 0);
7648
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007649 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertner65abd742008-08-25 15:26:24 -07007650 CAM_INVALIDATE(config->config_table[i]);
7651
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08007652 config->hdr.length = i;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007653 if (CHIP_REV_IS_SLOW(bp))
7654 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7655 else
7656 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
Eilon Greenstein0626b892009-02-12 08:38:14 +00007657 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertner65abd742008-08-25 15:26:24 -07007658 config->hdr.reserved1 = 0;
7659
7660 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7661 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7662 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7663
7664 } else { /* E1H */
7665 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7666
7667 bnx2x_set_mac_addr_e1h(bp, 0);
7668
7669 for (i = 0; i < MC_HASH_SIZE; i++)
7670 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007671
7672 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007673 }
7674
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007675 if (unload_mode == UNLOAD_NORMAL)
7676 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007677
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007678 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007679 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007680
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007681 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007682 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007683 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007684 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007685 /* The mac address is written to entries 1-4 to
7686 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007687 u8 entry = (BP_E1HVN(bp) + 1)*8;
7688
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007689 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007690 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007691
7692 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7693 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007694 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007695
7696 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007697
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007698 } else
7699 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7700
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007701 /* Close multi and leading connections
7702 Completions for ramrods are collected in a synchronous way */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007703 for_each_nondefault_queue(bp, i)
7704 if (bnx2x_stop_multi(bp, i))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007705 goto unload_error;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007706
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007707 rc = bnx2x_stop_leading(bp);
7708 if (rc) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007709 BNX2X_ERR("Stop leading failed!\n");
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007710#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007711 return -EBUSY;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007712#else
7713 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007714#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007715 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007716
Eliezer Tamir228241e2008-02-28 11:56:57 -08007717unload_error:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007718 if (!BP_NOMCP(bp))
Eliezer Tamir228241e2008-02-28 11:56:57 -08007719 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007720 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00007721 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007722 load_count[0], load_count[1], load_count[2]);
7723 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007724 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00007725 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007726 load_count[0], load_count[1], load_count[2]);
7727 if (load_count[0] == 0)
7728 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007729 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007730 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7731 else
7732 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7733 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007734
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007735 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7736 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7737 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007738
7739 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007740 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007741
7742 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007743 if (!BP_NOMCP(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007744 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007745
Eilon Greenstein9a035442008-11-03 16:45:55 -08007746 bp->port.pmf = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007747
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007748 /* Free SKBs, SGEs, TPA pool and driver internals */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007749 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007750 for_each_rx_queue(bp, i)
Eilon Greenstein3196a882008-08-13 15:58:49 -07007751 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007752 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00007753 netif_napi_del(&bnx2x_fp(bp, i, napi));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007754 bnx2x_free_mem(bp);
7755
7756 bp->state = BNX2X_STATE_CLOSED;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007757
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007758 netif_carrier_off(bp->dev);
7759
7760 return 0;
7761}
7762
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007763static void bnx2x_reset_task(struct work_struct *work)
7764{
7765 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7766
7767#ifdef BNX2X_STOP_ON_ERROR
7768 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7769 " so reset not done to allow debug dump,\n"
Joe Perchesad361c92009-07-06 13:05:40 -07007770 " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007771 return;
7772#endif
7773
7774 rtnl_lock();
7775
7776 if (!netif_running(bp->dev))
7777 goto reset_task_exit;
7778
7779 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7780 bnx2x_nic_load(bp, LOAD_NORMAL);
7781
7782reset_task_exit:
7783 rtnl_unlock();
7784}
7785
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007786/* end of nic load/unload */
7787
7788/* ethtool_ops */
7789
7790/*
7791 * Init service functions
7792 */
7793
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007794static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7795{
7796 switch (func) {
7797 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7798 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7799 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7800 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7801 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7802 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7803 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7804 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7805 default:
7806 BNX2X_ERR("Unsupported function index: %d\n", func);
7807 return (u32)(-1);
7808 }
7809}
7810
7811static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7812{
7813 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7814
7815 /* Flush all outstanding writes */
7816 mmiowb();
7817
7818 /* Pretend to be function 0 */
7819 REG_WR(bp, reg, 0);
7820 /* Flush the GRC transaction (in the chip) */
7821 new_val = REG_RD(bp, reg);
7822 if (new_val != 0) {
7823 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7824 new_val);
7825 BUG();
7826 }
7827
7828 /* From now we are in the "like-E1" mode */
7829 bnx2x_int_disable(bp);
7830
7831 /* Flush all outstanding writes */
7832 mmiowb();
7833
7834 /* Restore the original funtion settings */
7835 REG_WR(bp, reg, orig_func);
7836 new_val = REG_RD(bp, reg);
7837 if (new_val != orig_func) {
7838 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7839 orig_func, new_val);
7840 BUG();
7841 }
7842}
7843
7844static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7845{
7846 if (CHIP_IS_E1H(bp))
7847 bnx2x_undi_int_disable_e1h(bp, func);
7848 else
7849 bnx2x_int_disable(bp);
7850}
7851
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007852static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007853{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007854 u32 val;
7855
7856 /* Check if there is any driver already loaded */
7857 val = REG_RD(bp, MISC_REG_UNPREPARED);
7858 if (val == 0x1) {
7859 /* Check if it is the UNDI driver
7860 * UNDI driver initializes CID offset for normal bell to 0x7
7861 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007862 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007863 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7864 if (val == 0x7) {
7865 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007866 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007867 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007868 u32 swap_en;
7869 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007870
Eilon Greensteinb4661732009-01-14 06:43:56 +00007871 /* clear the UNDI indication */
7872 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7873
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007874 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7875
7876 /* try unload UNDI on port 0 */
7877 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007878 bp->fw_seq =
7879 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7880 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007881 reset_code = bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007882
7883 /* if UNDI is loaded on the other port */
7884 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7885
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007886 /* send "DONE" for previous unload */
7887 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7888
7889 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007890 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007891 bp->fw_seq =
7892 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7893 DRV_MSG_SEQ_NUMBER_MASK);
7894 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007895
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007896 bnx2x_fw_command(bp, reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007897 }
7898
Eilon Greensteinb4661732009-01-14 06:43:56 +00007899 /* now it's safe to release the lock */
7900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7901
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007902 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007903
7904 /* close input traffic and wait for it */
7905 /* Do not rcv packets to BRB */
7906 REG_WR(bp,
7907 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7908 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7909 /* Do not direct rcv packets that are not for MCP to
7910 * the BRB */
7911 REG_WR(bp,
7912 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7913 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7914 /* clear AEU */
7915 REG_WR(bp,
7916 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7917 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7918 msleep(10);
7919
7920 /* save NIG port swap info */
7921 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7922 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007923 /* reset device */
7924 REG_WR(bp,
7925 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007926 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007927 REG_WR(bp,
7928 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7929 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007930 /* take the NIG out of reset and restore swap values */
7931 REG_WR(bp,
7932 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7933 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7934 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7935 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7936
7937 /* send unload done to the MCP */
7938 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7939
7940 /* restore our func and fw_seq */
7941 bp->func = func;
7942 bp->fw_seq =
7943 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7944 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007945
7946 } else
7947 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007948 }
7949}
7950
7951static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7952{
7953 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007954 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007955
7956 /* Get the chip revision id and number. */
7957 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7958 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7959 id = ((val & 0xffff) << 16);
7960 val = REG_RD(bp, MISC_REG_CHIP_REV);
7961 id |= ((val & 0xf) << 12);
7962 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7963 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007964 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007965 id |= (val & 0xf);
7966 bp->common.chip_id = id;
7967 bp->link_params.chip_id = bp->common.chip_id;
7968 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7969
Eilon Greenstein1c063282009-02-12 08:36:43 +00007970 val = (REG_RD(bp, 0x2874) & 0x55);
7971 if ((bp->common.chip_id & 0x1) ||
7972 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7973 bp->flags |= ONE_PORT_FLAG;
7974 BNX2X_DEV_INFO("single port device\n");
7975 }
7976
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007977 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7978 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7979 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7980 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7981 bp->common.flash_size, bp->common.flash_size);
7982
7983 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00007984 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007985 bp->link_params.shmem_base = bp->common.shmem_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007986 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7987 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007988
7989 if (!bp->common.shmem_base ||
7990 (bp->common.shmem_base < 0xA0000) ||
7991 (bp->common.shmem_base >= 0xC0000)) {
7992 BNX2X_DEV_INFO("MCP not active\n");
7993 bp->flags |= NO_MCP_FLAG;
7994 return;
7995 }
7996
7997 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7998 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7999 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8000 BNX2X_ERR("BAD MCP validity signature\n");
8001
8002 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00008003 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008004
8005 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8006 SHARED_HW_CFG_LED_MODE_MASK) >>
8007 SHARED_HW_CFG_LED_MODE_SHIFT);
8008
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008009 bp->link_params.feature_config_flags = 0;
8010 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8011 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8012 bp->link_params.feature_config_flags |=
8013 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8014 else
8015 bp->link_params.feature_config_flags &=
8016 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8017
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008018 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8019 bp->common.bc_ver = val;
8020 BNX2X_DEV_INFO("bc_ver %X\n", val);
8021 if (val < BNX2X_BC_VER) {
8022 /* for now only warn
8023 * later we might need to enforce this */
8024 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8025 " please upgrade BC\n", BNX2X_BC_VER, val);
8026 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008027 bp->link_params.feature_config_flags |=
8028 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8029 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07008030
8031 if (BP_E1HVN(bp) == 0) {
8032 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8033 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8034 } else {
8035 /* no WOL capability for E1HVN != 0 */
8036 bp->flags |= NO_WOL_FLAG;
8037 }
8038 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00008039 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008040
8041 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8042 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8043 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8044 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8045
8046 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8047 val, val2, val3, val4);
8048}
8049
8050static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8051 u32 switch_cfg)
8052{
8053 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008054 u32 ext_phy_type;
8055
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008056 switch (switch_cfg) {
8057 case SWITCH_CFG_1G:
8058 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8059
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008060 ext_phy_type =
8061 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008062 switch (ext_phy_type) {
8063 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8064 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8065 ext_phy_type);
8066
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008067 bp->port.supported |= (SUPPORTED_10baseT_Half |
8068 SUPPORTED_10baseT_Full |
8069 SUPPORTED_100baseT_Half |
8070 SUPPORTED_100baseT_Full |
8071 SUPPORTED_1000baseT_Full |
8072 SUPPORTED_2500baseX_Full |
8073 SUPPORTED_TP |
8074 SUPPORTED_FIBRE |
8075 SUPPORTED_Autoneg |
8076 SUPPORTED_Pause |
8077 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008078 break;
8079
8080 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8081 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8082 ext_phy_type);
8083
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008084 bp->port.supported |= (SUPPORTED_10baseT_Half |
8085 SUPPORTED_10baseT_Full |
8086 SUPPORTED_100baseT_Half |
8087 SUPPORTED_100baseT_Full |
8088 SUPPORTED_1000baseT_Full |
8089 SUPPORTED_TP |
8090 SUPPORTED_FIBRE |
8091 SUPPORTED_Autoneg |
8092 SUPPORTED_Pause |
8093 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008094 break;
8095
8096 default:
8097 BNX2X_ERR("NVRAM config error. "
8098 "BAD SerDes ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008099 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008100 return;
8101 }
8102
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008103 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8104 port*0x10);
8105 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008106 break;
8107
8108 case SWITCH_CFG_10G:
8109 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8110
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008111 ext_phy_type =
8112 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008113 switch (ext_phy_type) {
8114 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8115 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8116 ext_phy_type);
8117
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008118 bp->port.supported |= (SUPPORTED_10baseT_Half |
8119 SUPPORTED_10baseT_Full |
8120 SUPPORTED_100baseT_Half |
8121 SUPPORTED_100baseT_Full |
8122 SUPPORTED_1000baseT_Full |
8123 SUPPORTED_2500baseX_Full |
8124 SUPPORTED_10000baseT_Full |
8125 SUPPORTED_TP |
8126 SUPPORTED_FIBRE |
8127 SUPPORTED_Autoneg |
8128 SUPPORTED_Pause |
8129 SUPPORTED_Asym_Pause);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008130 break;
8131
Eliezer Tamirf1410642008-02-28 11:51:50 -08008132 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8133 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
8134 ext_phy_type);
8135
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008136 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8137 SUPPORTED_1000baseT_Full |
8138 SUPPORTED_FIBRE |
8139 SUPPORTED_Autoneg |
8140 SUPPORTED_Pause |
8141 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008142 break;
8143
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8145 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
8146 ext_phy_type);
8147
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008148 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8149 SUPPORTED_2500baseX_Full |
8150 SUPPORTED_1000baseT_Full |
8151 SUPPORTED_FIBRE |
8152 SUPPORTED_Autoneg |
8153 SUPPORTED_Pause |
8154 SUPPORTED_Asym_Pause);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008155 break;
8156
Eilon Greenstein589abe32009-02-12 08:36:55 +00008157 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8158 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8159 ext_phy_type);
8160
8161 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8162 SUPPORTED_FIBRE |
8163 SUPPORTED_Pause |
8164 SUPPORTED_Asym_Pause);
8165 break;
8166
8167 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8168 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
8169 ext_phy_type);
8170
8171 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8172 SUPPORTED_1000baseT_Full |
8173 SUPPORTED_FIBRE |
8174 SUPPORTED_Pause |
8175 SUPPORTED_Asym_Pause);
8176 break;
8177
8178 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8179 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
8180 ext_phy_type);
8181
8182 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8183 SUPPORTED_1000baseT_Full |
8184 SUPPORTED_Autoneg |
8185 SUPPORTED_FIBRE |
8186 SUPPORTED_Pause |
8187 SUPPORTED_Asym_Pause);
8188 break;
8189
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008190 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8191 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8192 ext_phy_type);
8193
8194 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8195 SUPPORTED_1000baseT_Full |
8196 SUPPORTED_Autoneg |
8197 SUPPORTED_FIBRE |
8198 SUPPORTED_Pause |
8199 SUPPORTED_Asym_Pause);
8200 break;
8201
Eliezer Tamirf1410642008-02-28 11:51:50 -08008202 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8203 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8204 ext_phy_type);
8205
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008206 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8207 SUPPORTED_TP |
8208 SUPPORTED_Autoneg |
8209 SUPPORTED_Pause |
8210 SUPPORTED_Asym_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008211 break;
8212
Eilon Greenstein28577182009-02-12 08:37:00 +00008213 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8214 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8215 ext_phy_type);
8216
8217 bp->port.supported |= (SUPPORTED_10baseT_Half |
8218 SUPPORTED_10baseT_Full |
8219 SUPPORTED_100baseT_Half |
8220 SUPPORTED_100baseT_Full |
8221 SUPPORTED_1000baseT_Full |
8222 SUPPORTED_10000baseT_Full |
8223 SUPPORTED_TP |
8224 SUPPORTED_Autoneg |
8225 SUPPORTED_Pause |
8226 SUPPORTED_Asym_Pause);
8227 break;
8228
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008229 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8230 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8231 bp->link_params.ext_phy_config);
8232 break;
8233
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008234 default:
8235 BNX2X_ERR("NVRAM config error. "
8236 "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008237 bp->link_params.ext_phy_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008238 return;
8239 }
8240
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008241 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8242 port*0x18);
8243 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008244
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008245 break;
8246
8247 default:
8248 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008249 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008250 return;
8251 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008252 bp->link_params.phy_addr = bp->port.phy_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008253
8254 /* mask what we support according to speed_cap_mask */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008255 if (!(bp->link_params.speed_cap_mask &
8256 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008257 bp->port.supported &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008258
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008259 if (!(bp->link_params.speed_cap_mask &
8260 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008261 bp->port.supported &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008262
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008263 if (!(bp->link_params.speed_cap_mask &
8264 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008265 bp->port.supported &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008266
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008267 if (!(bp->link_params.speed_cap_mask &
8268 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008269 bp->port.supported &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008270
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008271 if (!(bp->link_params.speed_cap_mask &
8272 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008273 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8274 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008275
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008276 if (!(bp->link_params.speed_cap_mask &
8277 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008278 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008279
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008280 if (!(bp->link_params.speed_cap_mask &
8281 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008282 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008283
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008284 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008285}
8286
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008287static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008288{
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008289 bp->link_params.req_duplex = DUPLEX_FULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008290
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008291 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008292 case PORT_FEATURE_LINK_SPEED_AUTO:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008293 if (bp->port.supported & SUPPORTED_Autoneg) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008294 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008295 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008296 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008297 u32 ext_phy_type =
8298 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8299
8300 if ((ext_phy_type ==
8301 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8302 (ext_phy_type ==
8303 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008304 /* force 10G, no AN */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008305 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008306 bp->port.advertising =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008307 (ADVERTISED_10000baseT_Full |
8308 ADVERTISED_FIBRE);
8309 break;
8310 }
8311 BNX2X_ERR("NVRAM config error. "
8312 "Invalid link_config 0x%x"
8313 " Autoneg not supported\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008314 bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008315 return;
8316 }
8317 break;
8318
8319 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008320 if (bp->port.supported & SUPPORTED_10baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008321 bp->link_params.req_line_speed = SPEED_10;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008322 bp->port.advertising = (ADVERTISED_10baseT_Full |
8323 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008324 } else {
8325 BNX2X_ERR("NVRAM config error. "
8326 "Invalid link_config 0x%x"
8327 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008328 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008329 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008330 return;
8331 }
8332 break;
8333
8334 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008335 if (bp->port.supported & SUPPORTED_10baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008336 bp->link_params.req_line_speed = SPEED_10;
8337 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008338 bp->port.advertising = (ADVERTISED_10baseT_Half |
8339 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008340 } else {
8341 BNX2X_ERR("NVRAM config error. "
8342 "Invalid link_config 0x%x"
8343 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008344 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008345 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008346 return;
8347 }
8348 break;
8349
8350 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008351 if (bp->port.supported & SUPPORTED_100baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008352 bp->link_params.req_line_speed = SPEED_100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008353 bp->port.advertising = (ADVERTISED_100baseT_Full |
8354 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008355 } else {
8356 BNX2X_ERR("NVRAM config error. "
8357 "Invalid link_config 0x%x"
8358 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008359 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008360 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008361 return;
8362 }
8363 break;
8364
8365 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008366 if (bp->port.supported & SUPPORTED_100baseT_Half) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008367 bp->link_params.req_line_speed = SPEED_100;
8368 bp->link_params.req_duplex = DUPLEX_HALF;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008369 bp->port.advertising = (ADVERTISED_100baseT_Half |
8370 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008371 } else {
8372 BNX2X_ERR("NVRAM config error. "
8373 "Invalid link_config 0x%x"
8374 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008375 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008376 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008377 return;
8378 }
8379 break;
8380
8381 case PORT_FEATURE_LINK_SPEED_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008382 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008383 bp->link_params.req_line_speed = SPEED_1000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008384 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8385 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008386 } else {
8387 BNX2X_ERR("NVRAM config error. "
8388 "Invalid link_config 0x%x"
8389 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008390 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008391 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008392 return;
8393 }
8394 break;
8395
8396 case PORT_FEATURE_LINK_SPEED_2_5G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008397 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008398 bp->link_params.req_line_speed = SPEED_2500;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008399 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8400 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008401 } else {
8402 BNX2X_ERR("NVRAM config error. "
8403 "Invalid link_config 0x%x"
8404 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008405 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008406 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008407 return;
8408 }
8409 break;
8410
8411 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8412 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8413 case PORT_FEATURE_LINK_SPEED_10G_KR:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008414 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008415 bp->link_params.req_line_speed = SPEED_10000;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008416 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8417 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008418 } else {
8419 BNX2X_ERR("NVRAM config error. "
8420 "Invalid link_config 0x%x"
8421 " speed_cap_mask 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008422 bp->port.link_config,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008423 bp->link_params.speed_cap_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008424 return;
8425 }
8426 break;
8427
8428 default:
8429 BNX2X_ERR("NVRAM config error. "
8430 "BAD link speed link_config 0x%x\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008431 bp->port.link_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008432 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008433 bp->port.advertising = bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008434 break;
8435 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008436
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008437 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8438 PORT_FEATURE_FLOW_CONTROL_MASK);
David S. Millerc0700f92008-12-16 23:53:20 -08008439 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
Randy Dunlap4ab84d42008-08-07 20:33:19 -07008440 !(bp->port.supported & SUPPORTED_Autoneg))
David S. Millerc0700f92008-12-16 23:53:20 -08008441 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008442
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008443 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
Eliezer Tamirf1410642008-02-28 11:51:50 -08008444 " advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008445 bp->link_params.req_line_speed,
8446 bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008447 bp->link_params.req_flow_ctrl, bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008448}
8449
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008450static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008451{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008452 int port = BP_PORT(bp);
8453 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008454 u32 config;
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008455 u16 i;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008456 u32 ext_phy_type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008457
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008458 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008459 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008460
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008461 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008462 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008463 bp->link_params.ext_phy_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008464 SHMEM_RD(bp,
8465 dev_info.port_hw_config[port].external_phy_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008466 /* BCM8727_NOC => BCM8727 no over current */
8467 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8468 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8469 bp->link_params.ext_phy_config &=
8470 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8471 bp->link_params.ext_phy_config |=
8472 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8473 bp->link_params.feature_config_flags |=
8474 FEATURE_CONFIG_BCM8727_NOC;
8475 }
8476
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008477 bp->link_params.speed_cap_mask =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008478 SHMEM_RD(bp,
8479 dev_info.port_hw_config[port].speed_capability_mask);
8480
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008481 bp->port.link_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008482 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8483
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008484 /* Get the 4 lanes xgxs config rx and tx */
8485 for (i = 0; i < 2; i++) {
8486 val = SHMEM_RD(bp,
8487 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8488 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8489 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8490
8491 val = SHMEM_RD(bp,
8492 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8493 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8494 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8495 }
8496
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008497 /* If the device is capable of WoL, set the default state according
8498 * to the HW
8499 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008500 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008501 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8502 (config & PORT_FEATURE_WOL_ENABLED));
8503
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008504 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8505 " speed_cap_mask 0x%08x link_config 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008506 bp->link_params.lane_config,
8507 bp->link_params.ext_phy_config,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008508 bp->link_params.speed_cap_mask, bp->port.link_config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008509
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008510 bp->link_params.switch_cfg |= (bp->port.link_config &
8511 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008512 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008513
8514 bnx2x_link_settings_requested(bp);
8515
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008516 /*
8517 * If connected directly, work with the internal PHY, otherwise, work
8518 * with the external PHY
8519 */
8520 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8521 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8522 bp->mdio.prtad = bp->link_params.phy_addr;
8523
8524 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8525 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8526 bp->mdio.prtad =
8527 (bp->link_params.ext_phy_config &
8528 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
8529 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
8530
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008531 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8532 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8533 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8534 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8535 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8536 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8537 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8538 bp->dev->dev_addr[5] = (u8)(val & 0xff);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008539 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8540 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008541}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008542
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008543static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8544{
8545 int func = BP_FUNC(bp);
8546 u32 val, val2;
8547 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008548
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008549 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008550
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008551 bp->e1hov = 0;
8552 bp->e1hmf = 0;
8553 if (CHIP_IS_E1H(bp)) {
8554 bp->mf_config =
8555 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008556
Eilon Greenstein2691d512009-08-12 08:22:08 +00008557 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008558 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008559 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008560 bp->e1hmf = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008561 BNX2X_DEV_INFO("%s function mode\n",
8562 IS_E1HMF(bp) ? "multi" : "single");
8563
8564 if (IS_E1HMF(bp)) {
8565 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8566 e1hov_tag) &
8567 FUNC_MF_CFG_E1HOV_TAG_MASK);
8568 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8569 bp->e1hov = val;
8570 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8571 "(0x%04x)\n",
8572 func, bp->e1hov, bp->e1hov);
8573 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008574 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8575 " aborting\n", func);
8576 rc = -EPERM;
8577 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008578 } else {
8579 if (BP_E1HVN(bp)) {
8580 BNX2X_ERR("!!! VN %d in single function mode,"
8581 " aborting\n", BP_E1HVN(bp));
8582 rc = -EPERM;
8583 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008584 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008585 }
8586
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008587 if (!BP_NOMCP(bp)) {
8588 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008589
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008590 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8591 DRV_MSG_SEQ_NUMBER_MASK);
8592 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8593 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008594
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008595 if (IS_E1HMF(bp)) {
8596 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8597 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8598 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8599 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8600 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8601 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8602 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8603 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8604 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8605 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8606 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8607 ETH_ALEN);
8608 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8609 ETH_ALEN);
8610 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008611
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008612 return rc;
8613 }
8614
8615 if (BP_NOMCP(bp)) {
8616 /* only supposed to happen on emulation/FPGA */
Eilon Greenstein33471622008-08-13 15:59:08 -07008617 BNX2X_ERR("warning random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008618 random_ether_addr(bp->dev->dev_addr);
8619 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8620 }
8621
8622 return rc;
8623}
8624
8625static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8626{
8627 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00008628 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008629 int rc;
8630
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008631 /* Disable interrupt handling until HW is initialized */
8632 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008633 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008634
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008635 mutex_init(&bp->port.phy_mutex);
8636
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008637 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008638 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8639
8640 rc = bnx2x_get_hwinfo(bp);
8641
8642 /* need to reset chip if undi was active */
8643 if (!BP_NOMCP(bp))
8644 bnx2x_undi_unload(bp);
8645
8646 if (CHIP_REV_IS_FPGA(bp))
8647 printk(KERN_ERR PFX "FPGA detected\n");
8648
8649 if (BP_NOMCP(bp) && (func == 0))
8650 printk(KERN_ERR PFX
8651 "MCP disabled, must load devices in order!\n");
8652
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008653 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008654 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8655 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008656 printk(KERN_ERR PFX
Eilon Greenstein8badd272009-02-12 08:36:15 +00008657 "Multi disabled since int_mode requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008658 multi_mode = ETH_RSS_MODE_DISABLED;
8659 }
8660 bp->multi_mode = multi_mode;
8661
8662
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008663 /* Set TPA flags */
8664 if (disable_tpa) {
8665 bp->flags &= ~TPA_ENABLE_FLAG;
8666 bp->dev->features &= ~NETIF_F_LRO;
8667 } else {
8668 bp->flags |= TPA_ENABLE_FLAG;
8669 bp->dev->features |= NETIF_F_LRO;
8670 }
8671
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008672 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008673
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008674 bp->tx_ring_size = MAX_TX_AVAIL;
8675 bp->rx_ring_size = MAX_RX_AVAIL;
8676
8677 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008678
8679 bp->tx_ticks = 50;
8680 bp->rx_ticks = 25;
8681
Eilon Greenstein87942b42009-02-12 08:36:49 +00008682 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8683 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008684
8685 init_timer(&bp->timer);
8686 bp->timer.expires = jiffies + bp->current_interval;
8687 bp->timer.data = (unsigned long) bp;
8688 bp->timer.function = bnx2x_timer;
8689
8690 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008691}
8692
8693/*
8694 * ethtool service functions
8695 */
8696
8697/* All ethtool functions called with rtnl_lock */
8698
8699static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8700{
8701 struct bnx2x *bp = netdev_priv(dev);
8702
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008703 cmd->supported = bp->port.supported;
8704 cmd->advertising = bp->port.advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008705
8706 if (netif_carrier_ok(dev)) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008707 cmd->speed = bp->link_vars.line_speed;
8708 cmd->duplex = bp->link_vars.duplex;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008709 } else {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008710 cmd->speed = bp->link_params.req_line_speed;
8711 cmd->duplex = bp->link_params.req_duplex;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008712 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008713 if (IS_E1HMF(bp)) {
8714 u16 vn_max_rate;
8715
8716 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8717 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8718 if (vn_max_rate < cmd->speed)
8719 cmd->speed = vn_max_rate;
8720 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008721
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008722 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8723 u32 ext_phy_type =
8724 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
Eliezer Tamirf1410642008-02-28 11:51:50 -08008725
8726 switch (ext_phy_type) {
8727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008729 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
Eilon Greenstein589abe32009-02-12 08:36:55 +00008730 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8731 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8732 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008733 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008734 cmd->port = PORT_FIBRE;
8735 break;
8736
8737 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
Eilon Greenstein28577182009-02-12 08:37:00 +00008738 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008739 cmd->port = PORT_TP;
8740 break;
8741
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008742 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8743 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8744 bp->link_params.ext_phy_config);
8745 break;
8746
Eliezer Tamirf1410642008-02-28 11:51:50 -08008747 default:
8748 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008749 bp->link_params.ext_phy_config);
8750 break;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008751 }
8752 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008753 cmd->port = PORT_TP;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008754
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008755 cmd->phy_address = bp->mdio.prtad;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008756 cmd->transceiver = XCVR_INTERNAL;
8757
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008758 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008759 cmd->autoneg = AUTONEG_ENABLE;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008760 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008761 cmd->autoneg = AUTONEG_DISABLE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008762
8763 cmd->maxtxpkt = 0;
8764 cmd->maxrxpkt = 0;
8765
8766 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8767 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8768 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8769 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8770 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8771 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8772 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8773
8774 return 0;
8775}
8776
8777static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8778{
8779 struct bnx2x *bp = netdev_priv(dev);
8780 u32 advertising;
8781
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008782 if (IS_E1HMF(bp))
8783 return 0;
8784
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008785 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8786 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8787 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8788 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8789 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8790 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8791 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8792
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008793 if (cmd->autoneg == AUTONEG_ENABLE) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008794 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8795 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008796 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008797 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008798
8799 /* advertise the requested speed and duplex if supported */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008800 cmd->advertising &= bp->port.supported;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008801
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008802 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8803 bp->link_params.req_duplex = DUPLEX_FULL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008804 bp->port.advertising |= (ADVERTISED_Autoneg |
8805 cmd->advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008806
8807 } else { /* forced speed */
8808 /* advertise the requested speed and duplex if supported */
8809 switch (cmd->speed) {
8810 case SPEED_10:
8811 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008812 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008813 SUPPORTED_10baseT_Full)) {
8814 DP(NETIF_MSG_LINK,
8815 "10M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008816 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008817 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008818
8819 advertising = (ADVERTISED_10baseT_Full |
8820 ADVERTISED_TP);
8821 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008822 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008823 SUPPORTED_10baseT_Half)) {
8824 DP(NETIF_MSG_LINK,
8825 "10M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008826 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008827 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008828
8829 advertising = (ADVERTISED_10baseT_Half |
8830 ADVERTISED_TP);
8831 }
8832 break;
8833
8834 case SPEED_100:
8835 if (cmd->duplex == DUPLEX_FULL) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008836 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008837 SUPPORTED_100baseT_Full)) {
8838 DP(NETIF_MSG_LINK,
8839 "100M full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008840 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008841 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008842
8843 advertising = (ADVERTISED_100baseT_Full |
8844 ADVERTISED_TP);
8845 } else {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008846 if (!(bp->port.supported &
Eliezer Tamirf1410642008-02-28 11:51:50 -08008847 SUPPORTED_100baseT_Half)) {
8848 DP(NETIF_MSG_LINK,
8849 "100M half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008850 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008851 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008852
8853 advertising = (ADVERTISED_100baseT_Half |
8854 ADVERTISED_TP);
8855 }
8856 break;
8857
8858 case SPEED_1000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008859 if (cmd->duplex != DUPLEX_FULL) {
8860 DP(NETIF_MSG_LINK, "1G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008861 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008862 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008863
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008864 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008865 DP(NETIF_MSG_LINK, "1G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008866 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008867 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008868
8869 advertising = (ADVERTISED_1000baseT_Full |
8870 ADVERTISED_TP);
8871 break;
8872
8873 case SPEED_2500:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008874 if (cmd->duplex != DUPLEX_FULL) {
8875 DP(NETIF_MSG_LINK,
8876 "2.5G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008877 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008878 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008879
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008880 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008881 DP(NETIF_MSG_LINK,
8882 "2.5G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008883 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008884 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008885
Eliezer Tamirf1410642008-02-28 11:51:50 -08008886 advertising = (ADVERTISED_2500baseX_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008887 ADVERTISED_TP);
8888 break;
8889
8890 case SPEED_10000:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008891 if (cmd->duplex != DUPLEX_FULL) {
8892 DP(NETIF_MSG_LINK, "10G half not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008893 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008894 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008895
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008896 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08008897 DP(NETIF_MSG_LINK, "10G full not supported\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008898 return -EINVAL;
Eliezer Tamirf1410642008-02-28 11:51:50 -08008899 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008900
8901 advertising = (ADVERTISED_10000baseT_Full |
8902 ADVERTISED_FIBRE);
8903 break;
8904
8905 default:
Eliezer Tamirf1410642008-02-28 11:51:50 -08008906 DP(NETIF_MSG_LINK, "Unsupported speed\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008907 return -EINVAL;
8908 }
8909
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008910 bp->link_params.req_line_speed = cmd->speed;
8911 bp->link_params.req_duplex = cmd->duplex;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008912 bp->port.advertising = advertising;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008913 }
8914
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008915 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008916 DP_LEVEL " req_duplex %d advertising 0x%x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008917 bp->link_params.req_line_speed, bp->link_params.req_duplex,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008918 bp->port.advertising);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008919
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008920 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008921 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008922 bnx2x_link_set(bp);
8923 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008924
8925 return 0;
8926}
8927
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008928#define PHY_FW_VER_LEN 10
8929
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008930static void bnx2x_get_drvinfo(struct net_device *dev,
8931 struct ethtool_drvinfo *info)
8932{
8933 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf0e53a82008-08-13 15:58:30 -07008934 u8 phy_fw_ver[PHY_FW_VER_LEN];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008935
8936 strcpy(info->driver, DRV_MODULE_NAME);
8937 strcpy(info->version, DRV_MODULE_VERSION);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008938
8939 phy_fw_ver[0] = '\0';
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008940 if (bp->port.pmf) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008941 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008942 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8943 (bp->state != BNX2X_STATE_CLOSED),
8944 phy_fw_ver, PHY_FW_VER_LEN);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07008945 bnx2x_release_phy_lock(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008946 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008947
Eilon Greensteinf0e53a82008-08-13 15:58:30 -07008948 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8949 (bp->common.bc_ver & 0xff0000) >> 16,
8950 (bp->common.bc_ver & 0xff00) >> 8,
8951 (bp->common.bc_ver & 0xff),
8952 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008953 strcpy(info->bus_info, pci_name(bp->pdev));
8954 info->n_stats = BNX2X_NUM_STATS;
8955 info->testinfo_len = BNX2X_NUM_TESTS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008956 info->eedump_len = bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008957 info->regdump_len = 0;
8958}
8959
Eilon Greenstein0a64ea52009-03-02 08:01:12 +00008960#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8961#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8962
8963static int bnx2x_get_regs_len(struct net_device *dev)
8964{
8965 static u32 regdump_len;
8966 struct bnx2x *bp = netdev_priv(dev);
8967 int i;
8968
8969 if (regdump_len)
8970 return regdump_len;
8971
8972 if (CHIP_IS_E1(bp)) {
8973 for (i = 0; i < REGS_COUNT; i++)
8974 if (IS_E1_ONLINE(reg_addrs[i].info))
8975 regdump_len += reg_addrs[i].size;
8976
8977 for (i = 0; i < WREGS_COUNT_E1; i++)
8978 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8979 regdump_len += wreg_addrs_e1[i].size *
8980 (1 + wreg_addrs_e1[i].read_regs_count);
8981
8982 } else { /* E1H */
8983 for (i = 0; i < REGS_COUNT; i++)
8984 if (IS_E1H_ONLINE(reg_addrs[i].info))
8985 regdump_len += reg_addrs[i].size;
8986
8987 for (i = 0; i < WREGS_COUNT_E1H; i++)
8988 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8989 regdump_len += wreg_addrs_e1h[i].size *
8990 (1 + wreg_addrs_e1h[i].read_regs_count);
8991 }
8992 regdump_len *= 4;
8993 regdump_len += sizeof(struct dump_hdr);
8994
8995 return regdump_len;
8996}
8997
8998static void bnx2x_get_regs(struct net_device *dev,
8999 struct ethtool_regs *regs, void *_p)
9000{
9001 u32 *p = _p, i, j;
9002 struct bnx2x *bp = netdev_priv(dev);
9003 struct dump_hdr dump_hdr = {0};
9004
9005 regs->version = 0;
9006 memset(p, 0, regs->len);
9007
9008 if (!netif_running(bp->dev))
9009 return;
9010
9011 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9012 dump_hdr.dump_sign = dump_sign_all;
9013 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9014 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9015 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9016 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9017 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9018
9019 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9020 p += dump_hdr.hdr_size + 1;
9021
9022 if (CHIP_IS_E1(bp)) {
9023 for (i = 0; i < REGS_COUNT; i++)
9024 if (IS_E1_ONLINE(reg_addrs[i].info))
9025 for (j = 0; j < reg_addrs[i].size; j++)
9026 *p++ = REG_RD(bp,
9027 reg_addrs[i].addr + j*4);
9028
9029 } else { /* E1H */
9030 for (i = 0; i < REGS_COUNT; i++)
9031 if (IS_E1H_ONLINE(reg_addrs[i].info))
9032 for (j = 0; j < reg_addrs[i].size; j++)
9033 *p++ = REG_RD(bp,
9034 reg_addrs[i].addr + j*4);
9035 }
9036}
9037
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009038static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9039{
9040 struct bnx2x *bp = netdev_priv(dev);
9041
9042 if (bp->flags & NO_WOL_FLAG) {
9043 wol->supported = 0;
9044 wol->wolopts = 0;
9045 } else {
9046 wol->supported = WAKE_MAGIC;
9047 if (bp->wol)
9048 wol->wolopts = WAKE_MAGIC;
9049 else
9050 wol->wolopts = 0;
9051 }
9052 memset(&wol->sopass, 0, sizeof(wol->sopass));
9053}
9054
9055static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9056{
9057 struct bnx2x *bp = netdev_priv(dev);
9058
9059 if (wol->wolopts & ~WAKE_MAGIC)
9060 return -EINVAL;
9061
9062 if (wol->wolopts & WAKE_MAGIC) {
9063 if (bp->flags & NO_WOL_FLAG)
9064 return -EINVAL;
9065
9066 bp->wol = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009067 } else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009068 bp->wol = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009069
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009070 return 0;
9071}
9072
9073static u32 bnx2x_get_msglevel(struct net_device *dev)
9074{
9075 struct bnx2x *bp = netdev_priv(dev);
9076
9077 return bp->msglevel;
9078}
9079
9080static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9081{
9082 struct bnx2x *bp = netdev_priv(dev);
9083
9084 if (capable(CAP_NET_ADMIN))
9085 bp->msglevel = level;
9086}
9087
9088static int bnx2x_nway_reset(struct net_device *dev)
9089{
9090 struct bnx2x *bp = netdev_priv(dev);
9091
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009092 if (!bp->port.pmf)
9093 return 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009094
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009095 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009096 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009097 bnx2x_link_set(bp);
9098 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009099
9100 return 0;
9101}
9102
Naohiro Ooiwa01e53292009-06-30 12:44:19 -07009103static u32
9104bnx2x_get_link(struct net_device *dev)
9105{
9106 struct bnx2x *bp = netdev_priv(dev);
9107
9108 return bp->link_vars.link_up;
9109}
9110
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009111static int bnx2x_get_eeprom_len(struct net_device *dev)
9112{
9113 struct bnx2x *bp = netdev_priv(dev);
9114
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009115 return bp->common.flash_size;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009116}
9117
9118static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9119{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009120 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009121 int count, i;
9122 u32 val = 0;
9123
9124 /* adjust timeout for emulation/FPGA */
9125 count = NVRAM_TIMEOUT_COUNT;
9126 if (CHIP_REV_IS_SLOW(bp))
9127 count *= 100;
9128
9129 /* request access to nvram interface */
9130 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9131 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9132
9133 for (i = 0; i < count*10; i++) {
9134 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9135 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9136 break;
9137
9138 udelay(5);
9139 }
9140
9141 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009142 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009143 return -EBUSY;
9144 }
9145
9146 return 0;
9147}
9148
9149static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9150{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009151 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009152 int count, i;
9153 u32 val = 0;
9154
9155 /* adjust timeout for emulation/FPGA */
9156 count = NVRAM_TIMEOUT_COUNT;
9157 if (CHIP_REV_IS_SLOW(bp))
9158 count *= 100;
9159
9160 /* relinquish nvram interface */
9161 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9162 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9163
9164 for (i = 0; i < count*10; i++) {
9165 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9166 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9167 break;
9168
9169 udelay(5);
9170 }
9171
9172 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009173 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009174 return -EBUSY;
9175 }
9176
9177 return 0;
9178}
9179
9180static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9181{
9182 u32 val;
9183
9184 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9185
9186 /* enable both bits, even on read */
9187 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9188 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9189 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9190}
9191
9192static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9193{
9194 u32 val;
9195
9196 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9197
9198 /* disable both bits, even after read */
9199 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9200 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9201 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9202}
9203
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009204static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009205 u32 cmd_flags)
9206{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009207 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009208 u32 val;
9209
9210 /* build the command word */
9211 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9212
9213 /* need to clear DONE bit separately */
9214 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9215
9216 /* address of the NVRAM to read from */
9217 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9218 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9219
9220 /* issue a read command */
9221 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9222
9223 /* adjust timeout for emulation/FPGA */
9224 count = NVRAM_TIMEOUT_COUNT;
9225 if (CHIP_REV_IS_SLOW(bp))
9226 count *= 100;
9227
9228 /* wait for completion */
9229 *ret_val = 0;
9230 rc = -EBUSY;
9231 for (i = 0; i < count; i++) {
9232 udelay(5);
9233 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9234
9235 if (val & MCPR_NVM_COMMAND_DONE) {
9236 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009237 /* we read nvram data in cpu order
9238 * but ethtool sees it as an array of bytes
9239 * converting to big-endian will do the work */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009240 *ret_val = cpu_to_be32(val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009241 rc = 0;
9242 break;
9243 }
9244 }
9245
9246 return rc;
9247}
9248
9249static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9250 int buf_size)
9251{
9252 int rc;
9253 u32 cmd_flags;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009254 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009255
9256 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009257 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009258 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009259 offset, buf_size);
9260 return -EINVAL;
9261 }
9262
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009263 if (offset + buf_size > bp->common.flash_size) {
9264 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009265 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009266 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009267 return -EINVAL;
9268 }
9269
9270 /* request access to nvram interface */
9271 rc = bnx2x_acquire_nvram_lock(bp);
9272 if (rc)
9273 return rc;
9274
9275 /* enable access to nvram interface */
9276 bnx2x_enable_nvram_access(bp);
9277
9278 /* read the first word(s) */
9279 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9280 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9281 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9282 memcpy(ret_buf, &val, 4);
9283
9284 /* advance to the next dword */
9285 offset += sizeof(u32);
9286 ret_buf += sizeof(u32);
9287 buf_size -= sizeof(u32);
9288 cmd_flags = 0;
9289 }
9290
9291 if (rc == 0) {
9292 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9293 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9294 memcpy(ret_buf, &val, 4);
9295 }
9296
9297 /* disable access to nvram interface */
9298 bnx2x_disable_nvram_access(bp);
9299 bnx2x_release_nvram_lock(bp);
9300
9301 return rc;
9302}
9303
9304static int bnx2x_get_eeprom(struct net_device *dev,
9305 struct ethtool_eeprom *eeprom, u8 *eebuf)
9306{
9307 struct bnx2x *bp = netdev_priv(dev);
9308 int rc;
9309
Eilon Greenstein2add3ac2009-01-14 06:44:07 +00009310 if (!netif_running(dev))
9311 return -EAGAIN;
9312
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009313 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009314 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9315 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9316 eeprom->len, eeprom->len);
9317
9318 /* parameters already validated in ethtool_get_eeprom */
9319
9320 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9321
9322 return rc;
9323}
9324
9325static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9326 u32 cmd_flags)
9327{
Eliezer Tamirf1410642008-02-28 11:51:50 -08009328 int count, i, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009329
9330 /* build the command word */
9331 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9332
9333 /* need to clear DONE bit separately */
9334 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9335
9336 /* write the data */
9337 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9338
9339 /* address of the NVRAM to write to */
9340 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9341 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9342
9343 /* issue the write command */
9344 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9345
9346 /* adjust timeout for emulation/FPGA */
9347 count = NVRAM_TIMEOUT_COUNT;
9348 if (CHIP_REV_IS_SLOW(bp))
9349 count *= 100;
9350
9351 /* wait for completion */
9352 rc = -EBUSY;
9353 for (i = 0; i < count; i++) {
9354 udelay(5);
9355 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9356 if (val & MCPR_NVM_COMMAND_DONE) {
9357 rc = 0;
9358 break;
9359 }
9360 }
9361
9362 return rc;
9363}
9364
Eliezer Tamirf1410642008-02-28 11:51:50 -08009365#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009366
9367static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9368 int buf_size)
9369{
9370 int rc;
9371 u32 cmd_flags;
9372 u32 align_offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +00009373 __be32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009374
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009375 if (offset + buf_size > bp->common.flash_size) {
9376 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009377 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009378 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009379 return -EINVAL;
9380 }
9381
9382 /* request access to nvram interface */
9383 rc = bnx2x_acquire_nvram_lock(bp);
9384 if (rc)
9385 return rc;
9386
9387 /* enable access to nvram interface */
9388 bnx2x_enable_nvram_access(bp);
9389
9390 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9391 align_offset = (offset & ~0x03);
9392 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9393
9394 if (rc == 0) {
9395 val &= ~(0xff << BYTE_OFFSET(offset));
9396 val |= (*data_buf << BYTE_OFFSET(offset));
9397
9398 /* nvram data is returned as an array of bytes
9399 * convert it back to cpu order */
9400 val = be32_to_cpu(val);
9401
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009402 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9403 cmd_flags);
9404 }
9405
9406 /* disable access to nvram interface */
9407 bnx2x_disable_nvram_access(bp);
9408 bnx2x_release_nvram_lock(bp);
9409
9410 return rc;
9411}
9412
9413static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9414 int buf_size)
9415{
9416 int rc;
9417 u32 cmd_flags;
9418 u32 val;
9419 u32 written_so_far;
9420
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009421 if (buf_size == 1) /* ethtool */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009422 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009423
9424 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009425 DP(BNX2X_MSG_NVM,
Eliezer Tamirc14423f2008-02-28 11:49:42 -08009426 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009427 offset, buf_size);
9428 return -EINVAL;
9429 }
9430
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009431 if (offset + buf_size > bp->common.flash_size) {
9432 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009433 " buf_size (0x%x) > flash_size (0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009434 offset, buf_size, bp->common.flash_size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009435 return -EINVAL;
9436 }
9437
9438 /* request access to nvram interface */
9439 rc = bnx2x_acquire_nvram_lock(bp);
9440 if (rc)
9441 return rc;
9442
9443 /* enable access to nvram interface */
9444 bnx2x_enable_nvram_access(bp);
9445
9446 written_so_far = 0;
9447 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9448 while ((written_so_far < buf_size) && (rc == 0)) {
9449 if (written_so_far == (buf_size - sizeof(u32)))
9450 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9451 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9452 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9453 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9454 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9455
9456 memcpy(&val, data_buf, 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009457
9458 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9459
9460 /* advance to the next dword */
9461 offset += sizeof(u32);
9462 data_buf += sizeof(u32);
9463 written_so_far += sizeof(u32);
9464 cmd_flags = 0;
9465 }
9466
9467 /* disable access to nvram interface */
9468 bnx2x_disable_nvram_access(bp);
9469 bnx2x_release_nvram_lock(bp);
9470
9471 return rc;
9472}
9473
9474static int bnx2x_set_eeprom(struct net_device *dev,
9475 struct ethtool_eeprom *eeprom, u8 *eebuf)
9476{
9477 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009478 int port = BP_PORT(bp);
9479 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009480
Eilon Greenstein9f4c9582009-01-08 11:21:43 -08009481 if (!netif_running(dev))
9482 return -EAGAIN;
9483
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009484 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009485 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9486 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9487 eeprom->len, eeprom->len);
9488
9489 /* parameters already validated in ethtool_set_eeprom */
9490
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009491 /* PHY eeprom can be accessed only by the PMF */
9492 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9493 !bp->port.pmf)
9494 return -EINVAL;
9495
9496 if (eeprom->magic == 0x50485950) {
9497 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9498 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9499
9500 bnx2x_acquire_phy_lock(bp);
9501 rc |= bnx2x_link_reset(&bp->link_params,
9502 &bp->link_vars, 0);
9503 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9504 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9505 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9506 MISC_REGISTERS_GPIO_HIGH, port);
9507 bnx2x_release_phy_lock(bp);
9508 bnx2x_link_report(bp);
9509
9510 } else if (eeprom->magic == 0x50485952) {
9511 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9512 if ((bp->state == BNX2X_STATE_OPEN) ||
9513 (bp->state == BNX2X_STATE_DISABLED)) {
9514 bnx2x_acquire_phy_lock(bp);
9515 rc |= bnx2x_link_reset(&bp->link_params,
9516 &bp->link_vars, 1);
9517
9518 rc |= bnx2x_phy_init(&bp->link_params,
9519 &bp->link_vars);
9520 bnx2x_release_phy_lock(bp);
9521 bnx2x_calc_fc_adv(bp);
9522 }
9523 } else if (eeprom->magic == 0x53985943) {
9524 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9525 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9526 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9527 u8 ext_phy_addr =
9528 (bp->link_params.ext_phy_config &
9529 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
9530 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT;
9531
9532 /* DSP Remove Download Mode */
9533 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9534 MISC_REGISTERS_GPIO_LOW, port);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009535
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07009536 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009537
Eilon Greensteinf57a6022009-08-12 08:23:11 +00009538 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9539
9540 /* wait 0.5 sec to allow it to run */
9541 msleep(500);
9542 bnx2x_ext_phy_hw_reset(bp, port);
9543 msleep(500);
9544 bnx2x_release_phy_lock(bp);
9545 }
9546 } else
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009547 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009548
9549 return rc;
9550}
9551
9552static int bnx2x_get_coalesce(struct net_device *dev,
9553 struct ethtool_coalesce *coal)
9554{
9555 struct bnx2x *bp = netdev_priv(dev);
9556
9557 memset(coal, 0, sizeof(struct ethtool_coalesce));
9558
9559 coal->rx_coalesce_usecs = bp->rx_ticks;
9560 coal->tx_coalesce_usecs = bp->tx_ticks;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009561
9562 return 0;
9563}
9564
Eilon Greensteinca003922009-08-12 22:53:28 -07009565#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009566static int bnx2x_set_coalesce(struct net_device *dev,
9567 struct ethtool_coalesce *coal)
9568{
9569 struct bnx2x *bp = netdev_priv(dev);
9570
9571 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009572 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9573 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009574
9575 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
Eilon Greensteinca003922009-08-12 22:53:28 -07009576 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9577 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009578
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009579 if (netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009580 bnx2x_update_coalesce(bp);
9581
9582 return 0;
9583}
9584
9585static void bnx2x_get_ringparam(struct net_device *dev,
9586 struct ethtool_ringparam *ering)
9587{
9588 struct bnx2x *bp = netdev_priv(dev);
9589
9590 ering->rx_max_pending = MAX_RX_AVAIL;
9591 ering->rx_mini_max_pending = 0;
9592 ering->rx_jumbo_max_pending = 0;
9593
9594 ering->rx_pending = bp->rx_ring_size;
9595 ering->rx_mini_pending = 0;
9596 ering->rx_jumbo_pending = 0;
9597
9598 ering->tx_max_pending = MAX_TX_AVAIL;
9599 ering->tx_pending = bp->tx_ring_size;
9600}
9601
9602static int bnx2x_set_ringparam(struct net_device *dev,
9603 struct ethtool_ringparam *ering)
9604{
9605 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009606 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009607
9608 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9609 (ering->tx_pending > MAX_TX_AVAIL) ||
9610 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9611 return -EINVAL;
9612
9613 bp->rx_ring_size = ering->rx_pending;
9614 bp->tx_ring_size = ering->tx_pending;
9615
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009616 if (netif_running(dev)) {
9617 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9618 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009619 }
9620
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009621 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009622}
9623
9624static void bnx2x_get_pauseparam(struct net_device *dev,
9625 struct ethtool_pauseparam *epause)
9626{
9627 struct bnx2x *bp = netdev_priv(dev);
9628
Eilon Greenstein356e2382009-02-12 08:38:32 +00009629 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9630 BNX2X_FLOW_CTRL_AUTO) &&
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009631 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9632
David S. Millerc0700f92008-12-16 23:53:20 -08009633 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9634 BNX2X_FLOW_CTRL_RX);
9635 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9636 BNX2X_FLOW_CTRL_TX);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009637
9638 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9639 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9640 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9641}
9642
9643static int bnx2x_set_pauseparam(struct net_device *dev,
9644 struct ethtool_pauseparam *epause)
9645{
9646 struct bnx2x *bp = netdev_priv(dev);
9647
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009648 if (IS_E1HMF(bp))
9649 return 0;
9650
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009651 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9652 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9653 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9654
David S. Millerc0700f92008-12-16 23:53:20 -08009655 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009656
9657 if (epause->rx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009658 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009659
9660 if (epause->tx_pause)
David S. Millerc0700f92008-12-16 23:53:20 -08009661 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009662
David S. Millerc0700f92008-12-16 23:53:20 -08009663 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9664 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009665
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009666 if (epause->autoneg) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009667 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07009668 DP(NETIF_MSG_LINK, "autoneg not supported\n");
Eliezer Tamirf1410642008-02-28 11:51:50 -08009669 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009670 }
9671
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009672 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
David S. Millerc0700f92008-12-16 23:53:20 -08009673 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009674 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009675
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07009676 DP(NETIF_MSG_LINK,
9677 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009678
9679 if (netif_running(dev)) {
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07009680 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009681 bnx2x_link_set(bp);
9682 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009683
9684 return 0;
9685}
9686
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009687static int bnx2x_set_flags(struct net_device *dev, u32 data)
9688{
9689 struct bnx2x *bp = netdev_priv(dev);
9690 int changed = 0;
9691 int rc = 0;
9692
9693 /* TPA requires Rx CSUM offloading */
9694 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9695 if (!(dev->features & NETIF_F_LRO)) {
9696 dev->features |= NETIF_F_LRO;
9697 bp->flags |= TPA_ENABLE_FLAG;
9698 changed = 1;
9699 }
9700
9701 } else if (dev->features & NETIF_F_LRO) {
9702 dev->features &= ~NETIF_F_LRO;
9703 bp->flags &= ~TPA_ENABLE_FLAG;
9704 changed = 1;
9705 }
9706
9707 if (changed && netif_running(dev)) {
9708 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9709 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9710 }
9711
9712 return rc;
9713}
9714
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009715static u32 bnx2x_get_rx_csum(struct net_device *dev)
9716{
9717 struct bnx2x *bp = netdev_priv(dev);
9718
9719 return bp->rx_csum;
9720}
9721
9722static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9723{
9724 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009725 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009726
9727 bp->rx_csum = data;
Vladislav Zolotarovdf0f2342008-08-13 15:53:38 -07009728
9729 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9730 TPA'ed packets will be discarded due to wrong TCP CSUM */
9731 if (!data) {
9732 u32 flags = ethtool_op_get_flags(dev);
9733
9734 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9735 }
9736
9737 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009738}
9739
9740static int bnx2x_set_tso(struct net_device *dev, u32 data)
9741{
Eilon Greenstein755735e2008-06-23 20:35:13 -07009742 if (data) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009743 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -07009744 dev->features |= NETIF_F_TSO6;
9745 } else {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009746 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
Eilon Greenstein755735e2008-06-23 20:35:13 -07009747 dev->features &= ~NETIF_F_TSO6;
9748 }
9749
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009750 return 0;
9751}
9752
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009753static const struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009754 char string[ETH_GSTRING_LEN];
9755} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009756 { "register_test (offline)" },
9757 { "memory_test (offline)" },
9758 { "loopback_test (offline)" },
9759 { "nvram_test (online)" },
9760 { "interrupt_test (online)" },
9761 { "link_test (online)" },
Eilon Greensteind3d4f492009-02-12 08:36:27 +00009762 { "idle check (online)" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009763};
9764
9765static int bnx2x_self_test_count(struct net_device *dev)
9766{
9767 return BNX2X_NUM_TESTS;
9768}
9769
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009770static int bnx2x_test_registers(struct bnx2x *bp)
9771{
9772 int idx, i, rc = -ENODEV;
9773 u32 wr_val = 0;
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009774 int port = BP_PORT(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009775 static const struct {
9776 u32 offset0;
9777 u32 offset1;
9778 u32 mask;
9779 } reg_tbl[] = {
9780/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9781 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9782 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9783 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9784 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9785 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9786 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9787 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9788 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9789 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9790/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9791 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9792 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9793 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9794 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9795 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9796 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9797 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009798 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +00009799 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9800/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009801 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9802 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9803 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9804 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9805 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9806 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9807 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9808 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
Eilon Greensteinc1f1a062009-07-29 00:20:08 +00009809 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9810/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009811 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9812 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9813 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9814 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9815 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9816 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9817
9818 { 0xffffffff, 0, 0x00000000 }
9819 };
9820
9821 if (!netif_running(bp->dev))
9822 return rc;
9823
9824 /* Repeat the test twice:
9825 First by writing 0x00000000, second by writing 0xffffffff */
9826 for (idx = 0; idx < 2; idx++) {
9827
9828 switch (idx) {
9829 case 0:
9830 wr_val = 0;
9831 break;
9832 case 1:
9833 wr_val = 0xffffffff;
9834 break;
9835 }
9836
9837 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9838 u32 offset, mask, save_val, val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009839
9840 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9841 mask = reg_tbl[i].mask;
9842
9843 save_val = REG_RD(bp, offset);
9844
9845 REG_WR(bp, offset, wr_val);
9846 val = REG_RD(bp, offset);
9847
9848 /* Restore the original register's value */
9849 REG_WR(bp, offset, save_val);
9850
9851 /* verify that value is as expected value */
9852 if ((val & mask) != (wr_val & mask))
9853 goto test_reg_exit;
9854 }
9855 }
9856
9857 rc = 0;
9858
9859test_reg_exit:
9860 return rc;
9861}
9862
9863static int bnx2x_test_memory(struct bnx2x *bp)
9864{
9865 int i, j, rc = -ENODEV;
9866 u32 val;
9867 static const struct {
9868 u32 offset;
9869 int size;
9870 } mem_tbl[] = {
9871 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9872 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9873 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9874 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9875 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9876 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9877 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9878
9879 { 0xffffffff, 0 }
9880 };
9881 static const struct {
9882 char *name;
9883 u32 offset;
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009884 u32 e1_mask;
9885 u32 e1h_mask;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009886 } prty_tbl[] = {
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009887 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9888 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9889 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9890 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9891 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9892 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009893
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009894 { NULL, 0xffffffff, 0, 0 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009895 };
9896
9897 if (!netif_running(bp->dev))
9898 return rc;
9899
9900 /* Go through all the memories */
9901 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9902 for (j = 0; j < mem_tbl[i].size; j++)
9903 REG_RD(bp, mem_tbl[i].offset + j*4);
9904
9905 /* Check the parity status */
9906 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9907 val = REG_RD(bp, prty_tbl[i].offset);
Yitchak Gertner9dabc422008-08-13 15:51:28 -07009908 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9909 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009910 DP(NETIF_MSG_HW,
9911 "%s is 0x%x\n", prty_tbl[i].name, val);
9912 goto test_mem_exit;
9913 }
9914 }
9915
9916 rc = 0;
9917
9918test_mem_exit:
9919 return rc;
9920}
9921
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009922static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9923{
9924 int cnt = 1000;
9925
9926 if (link_up)
9927 while (bnx2x_link_test(bp) && cnt--)
9928 msleep(10);
9929}
9930
9931static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9932{
9933 unsigned int pkt_size, num_pkts, i;
9934 struct sk_buff *skb;
9935 unsigned char *packet;
Eilon Greensteinca003922009-08-12 22:53:28 -07009936 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9937 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009938 u16 tx_start_idx, tx_idx;
9939 u16 rx_start_idx, rx_idx;
Eilon Greensteinca003922009-08-12 22:53:28 -07009940 u16 pkt_prod, bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009941 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -07009942 struct eth_tx_start_bd *tx_start_bd;
9943 struct eth_tx_parse_bd *pbd = NULL;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009944 dma_addr_t mapping;
9945 union eth_rx_cqe *cqe;
9946 u8 cqe_fp_flags;
9947 struct sw_rx_bd *rx_buf;
9948 u16 len;
9949 int rc = -ENODEV;
9950
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009951 /* check the loopback mode */
9952 switch (loopback_mode) {
9953 case BNX2X_PHY_LOOPBACK:
9954 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9955 return -EINVAL;
9956 break;
9957 case BNX2X_MAC_LOOPBACK:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009958 bp->link_params.loopback_mode = LOOPBACK_BMAC;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009959 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009960 break;
9961 default:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009962 return -EINVAL;
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009963 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009964
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009965 /* prepare the loopback packet */
9966 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9967 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009968 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9969 if (!skb) {
9970 rc = -ENOMEM;
9971 goto test_loopback_exit;
9972 }
9973 packet = skb_put(skb, pkt_size);
9974 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
Eilon Greensteinca003922009-08-12 22:53:28 -07009975 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9976 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009977 for (i = ETH_HLEN; i < pkt_size; i++)
9978 packet[i] = (unsigned char) (i & 0xff);
9979
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00009980 /* send the loopback packet */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009981 num_pkts = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07009982 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9983 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009984
Eilon Greensteinca003922009-08-12 22:53:28 -07009985 pkt_prod = fp_tx->tx_pkt_prod++;
9986 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9987 tx_buf->first_bd = fp_tx->tx_bd_prod;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009988 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -07009989 tx_buf->flags = 0;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009990
Eilon Greensteinca003922009-08-12 22:53:28 -07009991 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9992 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -07009993 mapping = pci_map_single(bp->pdev, skb->data,
9994 skb_headlen(skb), PCI_DMA_TODEVICE);
Eilon Greensteinca003922009-08-12 22:53:28 -07009995 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9996 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9997 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9998 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9999 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10000 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10001 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10002 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10003
10004 /* turn on parsing and get a BD */
10005 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10006 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10007
10008 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010009
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080010010 wmb();
10011
Eilon Greensteinca003922009-08-12 22:53:28 -070010012 fp_tx->tx_db.data.prod += 2;
10013 barrier();
10014 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010015
10016 mmiowb();
10017
10018 num_pkts++;
Eilon Greensteinca003922009-08-12 22:53:28 -070010019 fp_tx->tx_bd_prod += 2; /* start + pbd */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010020 bp->dev->trans_start = jiffies;
10021
10022 udelay(100);
10023
Eilon Greensteinca003922009-08-12 22:53:28 -070010024 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010025 if (tx_idx != tx_start_idx + num_pkts)
10026 goto test_loopback_exit;
10027
Eilon Greensteinca003922009-08-12 22:53:28 -070010028 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010029 if (rx_idx != rx_start_idx + num_pkts)
10030 goto test_loopback_exit;
10031
Eilon Greensteinca003922009-08-12 22:53:28 -070010032 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010033 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10034 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10035 goto test_loopback_rx_exit;
10036
10037 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10038 if (len != pkt_size)
10039 goto test_loopback_rx_exit;
10040
Eilon Greensteinca003922009-08-12 22:53:28 -070010041 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010042 skb = rx_buf->skb;
10043 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10044 for (i = ETH_HLEN; i < pkt_size; i++)
10045 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10046 goto test_loopback_rx_exit;
10047
10048 rc = 0;
10049
10050test_loopback_rx_exit:
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010051
Eilon Greensteinca003922009-08-12 22:53:28 -070010052 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10053 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10054 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10055 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010056
10057 /* Update producers */
Eilon Greensteinca003922009-08-12 22:53:28 -070010058 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10059 fp_rx->rx_sge_prod);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010060
10061test_loopback_exit:
10062 bp->link_params.loopback_mode = LOOPBACK_NONE;
10063
10064 return rc;
10065}
10066
10067static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10068{
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010069 int rc = 0, res;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010070
10071 if (!netif_running(bp->dev))
10072 return BNX2X_LOOPBACK_FAILED;
10073
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070010074 bnx2x_netif_stop(bp, 1);
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010075 bnx2x_acquire_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010076
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010077 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10078 if (res) {
10079 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10080 rc |= BNX2X_PHY_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010081 }
10082
Eilon Greensteinb5bf9062009-02-12 08:38:08 +000010083 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10084 if (res) {
10085 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10086 rc |= BNX2X_MAC_LOOPBACK_FAILED;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010087 }
10088
Eilon Greenstein3910c8a2009-01-22 06:01:32 +000010089 bnx2x_release_phy_lock(bp);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010090 bnx2x_netif_start(bp);
10091
10092 return rc;
10093}
10094
10095#define CRC32_RESIDUAL 0xdebb20e3
10096
10097static int bnx2x_test_nvram(struct bnx2x *bp)
10098{
10099 static const struct {
10100 int offset;
10101 int size;
10102 } nvram_tbl[] = {
10103 { 0, 0x14 }, /* bootstrap */
10104 { 0x14, 0xec }, /* dir */
10105 { 0x100, 0x350 }, /* manuf_info */
10106 { 0x450, 0xf0 }, /* feature_info */
10107 { 0x640, 0x64 }, /* upgrade_key_info */
10108 { 0x6a4, 0x64 },
10109 { 0x708, 0x70 }, /* manuf_key_info */
10110 { 0x778, 0x70 },
10111 { 0, 0 }
10112 };
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010113 __be32 buf[0x350 / 4];
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010114 u8 *data = (u8 *)buf;
10115 int i, rc;
10116 u32 magic, csum;
10117
10118 rc = bnx2x_nvram_read(bp, 0, data, 4);
10119 if (rc) {
Eilon Greensteinf5372252009-02-12 08:38:30 +000010120 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010121 goto test_nvram_exit;
10122 }
10123
10124 magic = be32_to_cpu(buf[0]);
10125 if (magic != 0x669955aa) {
10126 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10127 rc = -ENODEV;
10128 goto test_nvram_exit;
10129 }
10130
10131 for (i = 0; nvram_tbl[i].size; i++) {
10132
10133 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10134 nvram_tbl[i].size);
10135 if (rc) {
10136 DP(NETIF_MSG_PROBE,
Eilon Greensteinf5372252009-02-12 08:38:30 +000010137 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010138 goto test_nvram_exit;
10139 }
10140
10141 csum = ether_crc_le(nvram_tbl[i].size, data);
10142 if (csum != CRC32_RESIDUAL) {
10143 DP(NETIF_MSG_PROBE,
10144 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10145 rc = -ENODEV;
10146 goto test_nvram_exit;
10147 }
10148 }
10149
10150test_nvram_exit:
10151 return rc;
10152}
10153
10154static int bnx2x_test_intr(struct bnx2x *bp)
10155{
10156 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10157 int i, rc;
10158
10159 if (!netif_running(bp->dev))
10160 return -ENODEV;
10161
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080010162 config->hdr.length = 0;
Eilon Greensteinaf246402009-01-14 06:43:59 +000010163 if (CHIP_IS_E1(bp))
10164 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10165 else
10166 config->hdr.offset = BP_FUNC(bp);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010167 config->hdr.client_id = bp->fp->cl_id;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010168 config->hdr.reserved1 = 0;
10169
10170 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10171 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10172 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10173 if (rc == 0) {
10174 bp->set_mac_pending++;
10175 for (i = 0; i < 10; i++) {
10176 if (!bp->set_mac_pending)
10177 break;
10178 msleep_interruptible(10);
10179 }
10180 if (i == 10)
10181 rc = -ENODEV;
10182 }
10183
10184 return rc;
10185}
10186
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010187static void bnx2x_self_test(struct net_device *dev,
10188 struct ethtool_test *etest, u64 *buf)
10189{
10190 struct bnx2x *bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010191
10192 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10193
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010194 if (!netif_running(dev))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010195 return;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010196
Eilon Greenstein33471622008-08-13 15:59:08 -070010197 /* offline tests are not supported in MF mode */
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010198 if (IS_E1HMF(bp))
10199 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10200
10201 if (etest->flags & ETH_TEST_FL_OFFLINE) {
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010202 int port = BP_PORT(bp);
10203 u32 val;
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010204 u8 link_up;
10205
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010206 /* save current value of input enable for TX port IF */
10207 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10208 /* disable input for TX port IF */
10209 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10210
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010211 link_up = bp->link_vars.link_up;
10212 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10213 bnx2x_nic_load(bp, LOAD_DIAG);
10214 /* wait until link state is restored */
10215 bnx2x_wait_for_link(bp, link_up);
10216
10217 if (bnx2x_test_registers(bp) != 0) {
10218 buf[0] = 1;
10219 etest->flags |= ETH_TEST_FL_FAILED;
10220 }
10221 if (bnx2x_test_memory(bp) != 0) {
10222 buf[1] = 1;
10223 etest->flags |= ETH_TEST_FL_FAILED;
10224 }
10225 buf[2] = bnx2x_test_loopback(bp, link_up);
10226 if (buf[2] != 0)
10227 etest->flags |= ETH_TEST_FL_FAILED;
10228
10229 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
Eilon Greenstein279abdf2009-07-21 05:47:22 +000010230
10231 /* restore input for TX port IF */
10232 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10233
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010234 bnx2x_nic_load(bp, LOAD_NORMAL);
10235 /* wait until link state is restored */
10236 bnx2x_wait_for_link(bp, link_up);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010237 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010238 if (bnx2x_test_nvram(bp) != 0) {
10239 buf[3] = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010240 etest->flags |= ETH_TEST_FL_FAILED;
10241 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010242 if (bnx2x_test_intr(bp) != 0) {
10243 buf[4] = 1;
10244 etest->flags |= ETH_TEST_FL_FAILED;
10245 }
10246 if (bp->port.pmf)
10247 if (bnx2x_link_test(bp) != 0) {
10248 buf[5] = 1;
10249 etest->flags |= ETH_TEST_FL_FAILED;
10250 }
Yitchak Gertnerf3c87cd2008-06-23 20:35:51 -070010251
10252#ifdef BNX2X_EXTRA_DEBUG
10253 bnx2x_panic_dump(bp);
10254#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010255}
10256
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010257static const struct {
10258 long offset;
10259 int size;
Eilon Greensteinde832a52009-02-12 08:36:33 +000010260 u8 string[ETH_GSTRING_LEN];
10261} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10262/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10263 { Q_STATS_OFFSET32(error_bytes_received_hi),
10264 8, "[%d]: rx_error_bytes" },
10265 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10266 8, "[%d]: rx_ucast_packets" },
10267 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10268 8, "[%d]: rx_mcast_packets" },
10269 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10270 8, "[%d]: rx_bcast_packets" },
10271 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10272 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10273 4, "[%d]: rx_phy_ip_err_discards"},
10274 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10275 4, "[%d]: rx_skb_alloc_discard" },
10276 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10277
10278/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10279 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10280 8, "[%d]: tx_packets" }
10281};
10282
10283static const struct {
10284 long offset;
10285 int size;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010286 u32 flags;
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010287#define STATS_FLAGS_PORT 1
10288#define STATS_FLAGS_FUNC 2
Eilon Greensteinde832a52009-02-12 08:36:33 +000010289#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010290 u8 string[ETH_GSTRING_LEN];
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010291} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010292/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10293 8, STATS_FLAGS_BOTH, "rx_bytes" },
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010294 { STATS_OFFSET32(error_bytes_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010295 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010296 { STATS_OFFSET32(total_unicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010297 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010298 { STATS_OFFSET32(total_multicast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010299 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010300 { STATS_OFFSET32(total_broadcast_packets_received_hi),
Eilon Greensteinde832a52009-02-12 08:36:33 +000010301 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010302 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010303 8, STATS_FLAGS_PORT, "rx_crc_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010304 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010305 8, STATS_FLAGS_PORT, "rx_align_errors" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010306 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10307 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10308 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10309 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10310/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10311 8, STATS_FLAGS_PORT, "rx_fragments" },
10312 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10313 8, STATS_FLAGS_PORT, "rx_jabbers" },
10314 { STATS_OFFSET32(no_buff_discard_hi),
10315 8, STATS_FLAGS_BOTH, "rx_discards" },
10316 { STATS_OFFSET32(mac_filter_discard),
10317 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10318 { STATS_OFFSET32(xxoverflow_discard),
10319 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10320 { STATS_OFFSET32(brb_drop_hi),
10321 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10322 { STATS_OFFSET32(brb_truncate_hi),
10323 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10324 { STATS_OFFSET32(pause_frames_received_hi),
10325 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10326 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10327 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10328 { STATS_OFFSET32(nig_timer_max),
10329 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10330/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10331 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10332 { STATS_OFFSET32(rx_skb_alloc_failed),
10333 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10334 { STATS_OFFSET32(hw_csum_err),
10335 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10336
10337 { STATS_OFFSET32(total_bytes_transmitted_hi),
10338 8, STATS_FLAGS_BOTH, "tx_bytes" },
10339 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10340 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10341 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10342 8, STATS_FLAGS_BOTH, "tx_packets" },
10343 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10344 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10345 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10346 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010347 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010348 8, STATS_FLAGS_PORT, "tx_single_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010349 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010350 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010351/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010352 8, STATS_FLAGS_PORT, "tx_deferred" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010353 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010354 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010355 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010356 8, STATS_FLAGS_PORT, "tx_late_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010357 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010358 8, STATS_FLAGS_PORT, "tx_total_collisions" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010359 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010360 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010361 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010362 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010363 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010364 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010365 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010366 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010367 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010368 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010369 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010370 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010371/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010372 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
Eilon Greensteinde832a52009-02-12 08:36:33 +000010373 { STATS_OFFSET32(pause_frames_sent_hi),
10374 8, STATS_FLAGS_PORT, "tx_pause_frames" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010375};
10376
Eilon Greensteinde832a52009-02-12 08:36:33 +000010377#define IS_PORT_STAT(i) \
10378 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10379#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10380#define IS_E1HMF_MODE_STAT(bp) \
10381 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
Yitchak Gertner66e855f2008-08-13 15:49:05 -070010382
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010383static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10384{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010385 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010386 int i, j, k;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010387
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010388 switch (stringset) {
10389 case ETH_SS_STATS:
Eilon Greensteinde832a52009-02-12 08:36:33 +000010390 if (is_multi(bp)) {
10391 k = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010392 for_each_rx_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010393 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10394 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10395 bnx2x_q_stats_arr[j].string, i);
10396 k += BNX2X_NUM_Q_STATS;
10397 }
10398 if (IS_E1HMF_MODE_STAT(bp))
10399 break;
10400 for (j = 0; j < BNX2X_NUM_STATS; j++)
10401 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10402 bnx2x_stats_arr[j].string);
10403 } else {
10404 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10405 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10406 continue;
10407 strcpy(buf + j*ETH_GSTRING_LEN,
10408 bnx2x_stats_arr[i].string);
10409 j++;
10410 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010411 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010412 break;
10413
10414 case ETH_SS_TEST:
10415 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10416 break;
10417 }
10418}
10419
10420static int bnx2x_get_stats_count(struct net_device *dev)
10421{
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010422 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010423 int i, num_stats;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010424
Eilon Greensteinde832a52009-02-12 08:36:33 +000010425 if (is_multi(bp)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070010426 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
Eilon Greensteinde832a52009-02-12 08:36:33 +000010427 if (!IS_E1HMF_MODE_STAT(bp))
10428 num_stats += BNX2X_NUM_STATS;
10429 } else {
10430 if (IS_E1HMF_MODE_STAT(bp)) {
10431 num_stats = 0;
10432 for (i = 0; i < BNX2X_NUM_STATS; i++)
10433 if (IS_FUNC_STAT(i))
10434 num_stats++;
10435 } else
10436 num_stats = BNX2X_NUM_STATS;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010437 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010438
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010439 return num_stats;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010440}
10441
10442static void bnx2x_get_ethtool_stats(struct net_device *dev,
10443 struct ethtool_stats *stats, u64 *buf)
10444{
10445 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinde832a52009-02-12 08:36:33 +000010446 u32 *hw_stats, *offset;
10447 int i, j, k;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010448
Eilon Greensteinde832a52009-02-12 08:36:33 +000010449 if (is_multi(bp)) {
10450 k = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010451 for_each_rx_queue(bp, i) {
Eilon Greensteinde832a52009-02-12 08:36:33 +000010452 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10453 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10454 if (bnx2x_q_stats_arr[j].size == 0) {
10455 /* skip this counter */
10456 buf[k + j] = 0;
10457 continue;
10458 }
10459 offset = (hw_stats +
10460 bnx2x_q_stats_arr[j].offset);
10461 if (bnx2x_q_stats_arr[j].size == 4) {
10462 /* 4-byte counter */
10463 buf[k + j] = (u64) *offset;
10464 continue;
10465 }
10466 /* 8-byte counter */
10467 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10468 }
10469 k += BNX2X_NUM_Q_STATS;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010470 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010471 if (IS_E1HMF_MODE_STAT(bp))
10472 return;
10473 hw_stats = (u32 *)&bp->eth_stats;
10474 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10475 if (bnx2x_stats_arr[j].size == 0) {
10476 /* skip this counter */
10477 buf[k + j] = 0;
10478 continue;
10479 }
10480 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10481 if (bnx2x_stats_arr[j].size == 4) {
10482 /* 4-byte counter */
10483 buf[k + j] = (u64) *offset;
10484 continue;
10485 }
10486 /* 8-byte counter */
10487 buf[k + j] = HILO_U64(*offset, *(offset + 1));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010488 }
Eilon Greensteinde832a52009-02-12 08:36:33 +000010489 } else {
10490 hw_stats = (u32 *)&bp->eth_stats;
10491 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10492 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10493 continue;
10494 if (bnx2x_stats_arr[i].size == 0) {
10495 /* skip this counter */
10496 buf[j] = 0;
10497 j++;
10498 continue;
10499 }
10500 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10501 if (bnx2x_stats_arr[i].size == 4) {
10502 /* 4-byte counter */
10503 buf[j] = (u64) *offset;
10504 j++;
10505 continue;
10506 }
10507 /* 8-byte counter */
10508 buf[j] = HILO_U64(*offset, *(offset + 1));
10509 j++;
10510 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010511 }
10512}
10513
10514static int bnx2x_phys_id(struct net_device *dev, u32 data)
10515{
10516 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010517 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010518 int i;
10519
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010520 if (!netif_running(dev))
10521 return 0;
10522
10523 if (!bp->port.pmf)
10524 return 0;
10525
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010526 if (data == 0)
10527 data = 2;
10528
10529 for (i = 0; i < (data * 2); i++) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010530 if ((i % 2) == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010531 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010532 bp->link_params.hw_led_mode,
10533 bp->link_params.chip_id);
10534 else
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010535 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010536 bp->link_params.hw_led_mode,
10537 bp->link_params.chip_id);
10538
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010539 msleep_interruptible(500);
10540 if (signal_pending(current))
10541 break;
10542 }
10543
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010544 if (bp->link_vars.link_up)
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010545 bnx2x_set_led(bp, port, LED_MODE_OPER,
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070010546 bp->link_vars.line_speed,
10547 bp->link_params.hw_led_mode,
10548 bp->link_params.chip_id);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010549
10550 return 0;
10551}
10552
10553static struct ethtool_ops bnx2x_ethtool_ops = {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010554 .get_settings = bnx2x_get_settings,
10555 .set_settings = bnx2x_set_settings,
10556 .get_drvinfo = bnx2x_get_drvinfo,
Eilon Greenstein0a64ea52009-03-02 08:01:12 +000010557 .get_regs_len = bnx2x_get_regs_len,
10558 .get_regs = bnx2x_get_regs,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010559 .get_wol = bnx2x_get_wol,
10560 .set_wol = bnx2x_set_wol,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010561 .get_msglevel = bnx2x_get_msglevel,
10562 .set_msglevel = bnx2x_set_msglevel,
10563 .nway_reset = bnx2x_nway_reset,
Naohiro Ooiwa01e53292009-06-30 12:44:19 -070010564 .get_link = bnx2x_get_link,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010565 .get_eeprom_len = bnx2x_get_eeprom_len,
10566 .get_eeprom = bnx2x_get_eeprom,
10567 .set_eeprom = bnx2x_set_eeprom,
10568 .get_coalesce = bnx2x_get_coalesce,
10569 .set_coalesce = bnx2x_set_coalesce,
10570 .get_ringparam = bnx2x_get_ringparam,
10571 .set_ringparam = bnx2x_set_ringparam,
10572 .get_pauseparam = bnx2x_get_pauseparam,
10573 .set_pauseparam = bnx2x_set_pauseparam,
10574 .get_rx_csum = bnx2x_get_rx_csum,
10575 .set_rx_csum = bnx2x_set_rx_csum,
10576 .get_tx_csum = ethtool_op_get_tx_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010577 .set_tx_csum = ethtool_op_set_tx_hw_csum,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010578 .set_flags = bnx2x_set_flags,
10579 .get_flags = ethtool_op_get_flags,
10580 .get_sg = ethtool_op_get_sg,
10581 .set_sg = ethtool_op_set_sg,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010582 .get_tso = ethtool_op_get_tso,
10583 .set_tso = bnx2x_set_tso,
10584 .self_test_count = bnx2x_self_test_count,
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -070010585 .self_test = bnx2x_self_test,
10586 .get_strings = bnx2x_get_strings,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010587 .phys_id = bnx2x_phys_id,
10588 .get_stats_count = bnx2x_get_stats_count,
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070010589 .get_ethtool_stats = bnx2x_get_ethtool_stats,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010590};
10591
10592/* end of ethtool_ops */
10593
10594/****************************************************************************
10595* General service functions
10596****************************************************************************/
10597
10598static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10599{
10600 u16 pmcsr;
10601
10602 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10603
10604 switch (state) {
10605 case PCI_D0:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010606 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010607 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10608 PCI_PM_CTRL_PME_STATUS));
10609
10610 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
Eilon Greenstein33471622008-08-13 15:59:08 -070010611 /* delay required during transition out of D3hot */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010612 msleep(20);
10613 break;
10614
10615 case PCI_D3hot:
10616 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10617 pmcsr |= 3;
10618
10619 if (bp->wol)
10620 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10621
10622 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10623 pmcsr);
10624
10625 /* No more memory access after this point until
10626 * device is brought back to D0.
10627 */
10628 break;
10629
10630 default:
10631 return -EINVAL;
10632 }
10633 return 0;
10634}
10635
Eilon Greenstein237907c2009-01-14 06:42:44 +000010636static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10637{
10638 u16 rx_cons_sb;
10639
10640 /* Tell compiler that status block fields can change */
10641 barrier();
10642 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10643 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10644 rx_cons_sb++;
10645 return (fp->rx_comp_cons != rx_cons_sb);
10646}
10647
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010648/*
10649 * net_device service functions
10650 */
10651
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010652static int bnx2x_poll(struct napi_struct *napi, int budget)
10653{
10654 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10655 napi);
10656 struct bnx2x *bp = fp->bp;
10657 int work_done = 0;
10658
10659#ifdef BNX2X_STOP_ON_ERROR
10660 if (unlikely(bp->panic))
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010661 goto poll_panic;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010662#endif
10663
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010664 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10665 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10666
10667 bnx2x_update_fpsb_idx(fp);
10668
Eilon Greenstein8534f322009-03-02 07:59:45 +000010669 if (bnx2x_has_rx_work(fp)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010670 work_done = bnx2x_rx_int(fp, budget);
Eilon Greenstein356e2382009-02-12 08:38:32 +000010671
Eilon Greenstein8534f322009-03-02 07:59:45 +000010672 /* must not complete if we consumed full budget */
10673 if (work_done >= budget)
10674 goto poll_again;
10675 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010676
Eilon Greensteinca003922009-08-12 22:53:28 -070010677 /* bnx2x_has_rx_work() reads the status block, thus we need to
Eilon Greenstein8534f322009-03-02 07:59:45 +000010678 * ensure that status block indices have been actually read
Eilon Greensteinca003922009-08-12 22:53:28 -070010679 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
Eilon Greenstein8534f322009-03-02 07:59:45 +000010680 * so that we won't write the "newer" value of the status block to IGU
Eilon Greensteinca003922009-08-12 22:53:28 -070010681 * (if there was a DMA right after bnx2x_has_rx_work and
Eilon Greenstein8534f322009-03-02 07:59:45 +000010682 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10683 * may be postponed to right before bnx2x_ack_sb). In this case
10684 * there will never be another interrupt until there is another update
10685 * of the status block, while there is still unhandled work.
10686 */
10687 rmb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010688
Eilon Greensteinca003922009-08-12 22:53:28 -070010689 if (!bnx2x_has_rx_work(fp)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010690#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein34f80b02008-06-23 20:33:01 -070010691poll_panic:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010692#endif
Ben Hutchings288379f2009-01-19 16:43:59 -080010693 napi_complete(napi);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010694
Eilon Greenstein0626b892009-02-12 08:38:14 +000010695 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010696 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
Eilon Greenstein0626b892009-02-12 08:38:14 +000010697 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010698 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10699 }
Eilon Greenstein356e2382009-02-12 08:38:32 +000010700
Eilon Greenstein8534f322009-03-02 07:59:45 +000010701poll_again:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010702 return work_done;
10703}
10704
Eilon Greenstein755735e2008-06-23 20:35:13 -070010705
10706/* we split the first BD into headers and data BDs
Eilon Greenstein33471622008-08-13 15:59:08 -070010707 * to ease the pain of our fellow microcode engineers
Eilon Greenstein755735e2008-06-23 20:35:13 -070010708 * we use one mapping for both BDs
10709 * So far this has only been observed to happen
10710 * in Other Operating Systems(TM)
10711 */
10712static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10713 struct bnx2x_fastpath *fp,
Eilon Greensteinca003922009-08-12 22:53:28 -070010714 struct sw_tx_bd *tx_buf,
10715 struct eth_tx_start_bd **tx_bd, u16 hlen,
Eilon Greenstein755735e2008-06-23 20:35:13 -070010716 u16 bd_prod, int nbd)
10717{
Eilon Greensteinca003922009-08-12 22:53:28 -070010718 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010719 struct eth_tx_bd *d_tx_bd;
10720 dma_addr_t mapping;
10721 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10722
10723 /* first fix first BD */
10724 h_tx_bd->nbd = cpu_to_le16(nbd);
10725 h_tx_bd->nbytes = cpu_to_le16(hlen);
10726
10727 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10728 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10729 h_tx_bd->addr_lo, h_tx_bd->nbd);
10730
10731 /* now get a new data BD
10732 * (after the pbd) and fill it */
10733 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070010734 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010735
10736 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10737 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10738
10739 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10740 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10741 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070010742
10743 /* this marks the BD as one that has no individual mapping */
10744 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10745
Eilon Greenstein755735e2008-06-23 20:35:13 -070010746 DP(NETIF_MSG_TX_QUEUED,
10747 "TSO split data size is %d (%x:%x)\n",
10748 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10749
Eilon Greensteinca003922009-08-12 22:53:28 -070010750 /* update tx_bd */
10751 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010752
10753 return bd_prod;
10754}
10755
10756static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10757{
10758 if (fix > 0)
10759 csum = (u16) ~csum_fold(csum_sub(csum,
10760 csum_partial(t_header - fix, fix, 0)));
10761
10762 else if (fix < 0)
10763 csum = (u16) ~csum_fold(csum_add(csum,
10764 csum_partial(t_header, -fix, 0)));
10765
10766 return swab16(csum);
10767}
10768
10769static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10770{
10771 u32 rc;
10772
10773 if (skb->ip_summed != CHECKSUM_PARTIAL)
10774 rc = XMIT_PLAIN;
10775
10776 else {
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010777 if (skb->protocol == htons(ETH_P_IPV6)) {
Eilon Greenstein755735e2008-06-23 20:35:13 -070010778 rc = XMIT_CSUM_V6;
10779 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10780 rc |= XMIT_CSUM_TCP;
10781
10782 } else {
10783 rc = XMIT_CSUM_V4;
10784 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10785 rc |= XMIT_CSUM_TCP;
10786 }
10787 }
10788
10789 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10790 rc |= XMIT_GSO_V4;
10791
10792 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10793 rc |= XMIT_GSO_V6;
10794
10795 return rc;
10796}
10797
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010798#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000010799/* check if packet requires linearization (packet is too fragmented)
10800 no need to check fragmentation if page size > 8K (there will be no
10801 violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070010802static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10803 u32 xmit_type)
10804{
10805 int to_copy = 0;
10806 int hlen = 0;
10807 int first_bd_sz = 0;
10808
10809 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10810 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10811
10812 if (xmit_type & XMIT_GSO) {
10813 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10814 /* Check if LSO packet needs to be copied:
10815 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10816 int wnd_size = MAX_FETCH_BD - 3;
Eilon Greenstein33471622008-08-13 15:59:08 -070010817 /* Number of windows to check */
Eilon Greenstein755735e2008-06-23 20:35:13 -070010818 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10819 int wnd_idx = 0;
10820 int frag_idx = 0;
10821 u32 wnd_sum = 0;
10822
10823 /* Headers length */
10824 hlen = (int)(skb_transport_header(skb) - skb->data) +
10825 tcp_hdrlen(skb);
10826
10827 /* Amount of data (w/o headers) on linear part of SKB*/
10828 first_bd_sz = skb_headlen(skb) - hlen;
10829
10830 wnd_sum = first_bd_sz;
10831
10832 /* Calculate the first sum - it's special */
10833 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10834 wnd_sum +=
10835 skb_shinfo(skb)->frags[frag_idx].size;
10836
10837 /* If there was data on linear skb data - check it */
10838 if (first_bd_sz > 0) {
10839 if (unlikely(wnd_sum < lso_mss)) {
10840 to_copy = 1;
10841 goto exit_lbl;
10842 }
10843
10844 wnd_sum -= first_bd_sz;
10845 }
10846
10847 /* Others are easier: run through the frag list and
10848 check all windows */
10849 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10850 wnd_sum +=
10851 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10852
10853 if (unlikely(wnd_sum < lso_mss)) {
10854 to_copy = 1;
10855 break;
10856 }
10857 wnd_sum -=
10858 skb_shinfo(skb)->frags[wnd_idx].size;
10859 }
Eilon Greenstein755735e2008-06-23 20:35:13 -070010860 } else {
10861 /* in non-LSO too fragmented packet should always
10862 be linearized */
10863 to_copy = 1;
10864 }
10865 }
10866
10867exit_lbl:
10868 if (unlikely(to_copy))
10869 DP(NETIF_MSG_TX_QUEUED,
10870 "Linearization IS REQUIRED for %s packet. "
10871 "num_frags %d hlen %d first_bd_sz %d\n",
10872 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10873 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10874
10875 return to_copy;
10876}
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010877#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070010878
10879/* called with netif_tx_lock
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010880 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
Eilon Greenstein755735e2008-06-23 20:35:13 -070010881 * netif_wake_queue()
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010882 */
10883static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10884{
10885 struct bnx2x *bp = netdev_priv(dev);
Eilon Greensteinca003922009-08-12 22:53:28 -070010886 struct bnx2x_fastpath *fp, *fp_stat;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010887 struct netdev_queue *txq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010888 struct sw_tx_bd *tx_buf;
Eilon Greensteinca003922009-08-12 22:53:28 -070010889 struct eth_tx_start_bd *tx_start_bd;
10890 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010891 struct eth_tx_parse_bd *pbd = NULL;
10892 u16 pkt_prod, bd_prod;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010893 int nbd, fp_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010894 dma_addr_t mapping;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010895 u32 xmit_type = bnx2x_xmit_type(bp, skb);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010896 int i;
10897 u8 hlen = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070010898 __le16 pkt_size = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010899
10900#ifdef BNX2X_STOP_ON_ERROR
10901 if (unlikely(bp->panic))
10902 return NETDEV_TX_BUSY;
10903#endif
10904
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010905 fp_index = skb_get_queue_mapping(skb);
10906 txq = netdev_get_tx_queue(dev, fp_index);
10907
Eilon Greensteinca003922009-08-12 22:53:28 -070010908 fp = &bp->fp[fp_index + bp->num_rx_queues];
10909 fp_stat = &bp->fp[fp_index];
Eilon Greenstein755735e2008-06-23 20:35:13 -070010910
Yitchak Gertner231fd582008-08-25 15:27:06 -070010911 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
Eilon Greensteinca003922009-08-12 22:53:28 -070010912 fp_stat->eth_q_stats.driver_xoff++;
Eilon Greenstein555f6c72009-02-12 08:36:11 +000010913 netif_tx_stop_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010914 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10915 return NETDEV_TX_BUSY;
10916 }
10917
Eilon Greenstein755735e2008-06-23 20:35:13 -070010918 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10919 " gso type %x xmit_type %x\n",
10920 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10921 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10922
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010923#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
Eilon Greensteinf5372252009-02-12 08:38:30 +000010924 /* First, check if we need to linearize the skb (due to FW
10925 restrictions). No need to check fragmentation if page size > 8K
10926 (there will be no violation to FW restrictions) */
Eilon Greenstein755735e2008-06-23 20:35:13 -070010927 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10928 /* Statistics of linearization */
10929 bp->lin_cnt++;
10930 if (skb_linearize(skb) != 0) {
10931 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10932 "silently dropping this SKB\n");
10933 dev_kfree_skb_any(skb);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070010934 return NETDEV_TX_OK;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010935 }
10936 }
Eilon Greenstein632da4d2009-01-14 06:44:10 +000010937#endif
Eilon Greenstein755735e2008-06-23 20:35:13 -070010938
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010939 /*
Eilon Greenstein755735e2008-06-23 20:35:13 -070010940 Please read carefully. First we use one BD which we mark as start,
Eilon Greensteinca003922009-08-12 22:53:28 -070010941 then we have a parsing info BD (used for TSO or xsum),
Eilon Greenstein755735e2008-06-23 20:35:13 -070010942 and only then we have the rest of the TSO BDs.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010943 (don't forget to mark the last one as last,
10944 and to unmap only AFTER you write to the BD ...)
Eilon Greenstein755735e2008-06-23 20:35:13 -070010945 And above all, all pdb sizes are in words - NOT DWORDS!
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010946 */
10947
10948 pkt_prod = fp->tx_pkt_prod++;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010949 bd_prod = TX_BD(fp->tx_bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010950
Eilon Greenstein755735e2008-06-23 20:35:13 -070010951 /* get a tx_buf and first BD */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010952 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
Eilon Greensteinca003922009-08-12 22:53:28 -070010953 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010954
Eilon Greensteinca003922009-08-12 22:53:28 -070010955 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10956 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10957 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
Eilon Greenstein3196a882008-08-13 15:58:49 -070010958 /* header nbd */
Eilon Greensteinca003922009-08-12 22:53:28 -070010959 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010960
Eilon Greenstein755735e2008-06-23 20:35:13 -070010961 /* remember the first BD of the packet */
10962 tx_buf->first_bd = fp->tx_bd_prod;
10963 tx_buf->skb = skb;
Eilon Greensteinca003922009-08-12 22:53:28 -070010964 tx_buf->flags = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010965
10966 DP(NETIF_MSG_TX_QUEUED,
10967 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070010968 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020010969
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010970#ifdef BCM_VLAN
10971 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10972 (bp->flags & HW_VLAN_TX_FLAG)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070010973 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10974 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010975 } else
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080010976#endif
Eilon Greensteinca003922009-08-12 22:53:28 -070010977 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
Eilon Greenstein755735e2008-06-23 20:35:13 -070010978
Eilon Greensteinca003922009-08-12 22:53:28 -070010979 /* turn on parsing and get a BD */
10980 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10981 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010982
Eilon Greensteinca003922009-08-12 22:53:28 -070010983 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
Eilon Greenstein755735e2008-06-23 20:35:13 -070010984
10985 if (xmit_type & XMIT_CSUM) {
Eilon Greensteinca003922009-08-12 22:53:28 -070010986 hlen = (skb_network_header(skb) - skb->data) / 2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070010987
10988 /* for now NS flag is not used in Linux */
Eilon Greenstein4781bfa2009-02-12 08:38:17 +000010989 pbd->global_data =
10990 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10991 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
Eilon Greenstein755735e2008-06-23 20:35:13 -070010992
10993 pbd->ip_hlen = (skb_transport_header(skb) -
10994 skb_network_header(skb)) / 2;
10995
10996 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10997
10998 pbd->total_hlen = cpu_to_le16(hlen);
Eilon Greensteinca003922009-08-12 22:53:28 -070010999 hlen = hlen*2;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011000
Eilon Greensteinca003922009-08-12 22:53:28 -070011001 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011002
11003 if (xmit_type & XMIT_CSUM_V4)
Eilon Greensteinca003922009-08-12 22:53:28 -070011004 tx_start_bd->bd_flags.as_bitfield |=
Eilon Greenstein755735e2008-06-23 20:35:13 -070011005 ETH_TX_BD_FLAGS_IP_CSUM;
11006 else
Eilon Greensteinca003922009-08-12 22:53:28 -070011007 tx_start_bd->bd_flags.as_bitfield |=
11008 ETH_TX_BD_FLAGS_IPV6;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011009
11010 if (xmit_type & XMIT_CSUM_TCP) {
11011 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11012
11013 } else {
11014 s8 fix = SKB_CS_OFF(skb); /* signed! */
11015
Eilon Greensteinca003922009-08-12 22:53:28 -070011016 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
Eilon Greenstein755735e2008-06-23 20:35:13 -070011017
11018 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011019 "hlen %d fix %d csum before fix %x\n",
11020 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011021
11022 /* HW bug: fixup the CSUM */
11023 pbd->tcp_pseudo_csum =
11024 bnx2x_csum_fix(skb_transport_header(skb),
11025 SKB_CS(skb), fix);
11026
11027 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11028 pbd->tcp_pseudo_csum);
11029 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011030 }
11031
11032 mapping = pci_map_single(bp->pdev, skb->data,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011033 skb_headlen(skb), PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011034
Eilon Greensteinca003922009-08-12 22:53:28 -070011035 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11036 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11037 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11038 tx_start_bd->nbd = cpu_to_le16(nbd);
11039 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11040 pkt_size = tx_start_bd->nbytes;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011041
11042 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
Eilon Greenstein755735e2008-06-23 20:35:13 -070011043 " nbytes %d flags %x vlan %x\n",
Eilon Greensteinca003922009-08-12 22:53:28 -070011044 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11045 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11046 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011047
Eilon Greenstein755735e2008-06-23 20:35:13 -070011048 if (xmit_type & XMIT_GSO) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011049
11050 DP(NETIF_MSG_TX_QUEUED,
11051 "TSO packet len %d hlen %d total len %d tso size %d\n",
11052 skb->len, hlen, skb_headlen(skb),
11053 skb_shinfo(skb)->gso_size);
11054
Eilon Greensteinca003922009-08-12 22:53:28 -070011055 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011056
Eilon Greenstein755735e2008-06-23 20:35:13 -070011057 if (unlikely(skb_headlen(skb) > hlen))
Eilon Greensteinca003922009-08-12 22:53:28 -070011058 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11059 hlen, bd_prod, ++nbd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011060
11061 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11062 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
Eilon Greenstein755735e2008-06-23 20:35:13 -070011063 pbd->tcp_flags = pbd_tcp_flags(skb);
11064
11065 if (xmit_type & XMIT_GSO_V4) {
11066 pbd->ip_id = swab16(ip_hdr(skb)->id);
11067 pbd->tcp_pseudo_csum =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011068 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11069 ip_hdr(skb)->daddr,
11070 0, IPPROTO_TCP, 0));
Eilon Greenstein755735e2008-06-23 20:35:13 -070011071
11072 } else
11073 pbd->tcp_pseudo_csum =
11074 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11075 &ipv6_hdr(skb)->daddr,
11076 0, IPPROTO_TCP, 0));
11077
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011078 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11079 }
Eilon Greensteinca003922009-08-12 22:53:28 -070011080 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011081
Eilon Greenstein755735e2008-06-23 20:35:13 -070011082 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11083 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011084
Eilon Greenstein755735e2008-06-23 20:35:13 -070011085 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Eilon Greensteinca003922009-08-12 22:53:28 -070011086 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11087 if (total_pkt_bd == NULL)
11088 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011089
Eilon Greenstein755735e2008-06-23 20:35:13 -070011090 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11091 frag->size, PCI_DMA_TODEVICE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011092
Eilon Greensteinca003922009-08-12 22:53:28 -070011093 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11094 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11095 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11096 le16_add_cpu(&pkt_size, frag->size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011097
Eilon Greenstein755735e2008-06-23 20:35:13 -070011098 DP(NETIF_MSG_TX_QUEUED,
Eilon Greensteinca003922009-08-12 22:53:28 -070011099 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11100 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11101 le16_to_cpu(tx_data_bd->nbytes));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011102 }
11103
Eilon Greensteinca003922009-08-12 22:53:28 -070011104 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011105
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011106 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11107
Eilon Greenstein755735e2008-06-23 20:35:13 -070011108 /* now send a tx doorbell, counting the next BD
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011109 * if the packet contains or ends with it
11110 */
11111 if (TX_BD_POFF(bd_prod) < nbd)
11112 nbd++;
11113
Eilon Greensteinca003922009-08-12 22:53:28 -070011114 if (total_pkt_bd != NULL)
11115 total_pkt_bd->total_pkt_bytes = pkt_size;
11116
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011117 if (pbd)
11118 DP(NETIF_MSG_TX_QUEUED,
11119 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11120 " tcp_flags %x xsum %x seq %u hlen %u\n",
11121 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11122 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
Eilon Greenstein755735e2008-06-23 20:35:13 -070011123 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011124
Eilon Greenstein755735e2008-06-23 20:35:13 -070011125 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011126
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011127 /*
11128 * Make sure that the BD data is updated before updating the producer
11129 * since FW might read the BD right after the producer is updated.
11130 * This is only applicable for weak-ordered memory model archs such
11131 * as IA-64. The following barrier is also mandatory since FW will
11132 * assumes packets must have BDs.
11133 */
11134 wmb();
11135
Eilon Greensteinca003922009-08-12 22:53:28 -070011136 fp->tx_db.data.prod += nbd;
11137 barrier();
11138 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011139
11140 mmiowb();
11141
Eilon Greenstein755735e2008-06-23 20:35:13 -070011142 fp->tx_bd_prod += nbd;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011143
11144 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
Eilon Greensteinca003922009-08-12 22:53:28 -070011145 netif_tx_stop_queue(txq);
Eilon Greenstein58f4c4c2009-01-14 21:23:36 -080011146 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11147 if we put Tx into XOFF state. */
11148 smp_mb();
Eilon Greensteinca003922009-08-12 22:53:28 -070011149 fp_stat->eth_q_stats.driver_xoff++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011150 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011151 netif_tx_wake_queue(txq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011152 }
Eilon Greensteinca003922009-08-12 22:53:28 -070011153 fp_stat->tx_pkt++;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011154
11155 return NETDEV_TX_OK;
11156}
11157
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011158/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011159static int bnx2x_open(struct net_device *dev)
11160{
11161 struct bnx2x *bp = netdev_priv(dev);
11162
Eilon Greenstein6eccabb2009-01-22 03:37:48 +000011163 netif_carrier_off(dev);
11164
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011165 bnx2x_set_power_state(bp, PCI_D0);
11166
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011167 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011168}
11169
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011170/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011171static int bnx2x_close(struct net_device *dev)
11172{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011173 struct bnx2x *bp = netdev_priv(dev);
11174
11175 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -070011176 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11177 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11178 if (!CHIP_REV_IS_SLOW(bp))
11179 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011180
11181 return 0;
11182}
11183
Eilon Greensteinf5372252009-02-12 08:38:30 +000011184/* called with netif_tx_lock from dev_mcast.c */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011185static void bnx2x_set_rx_mode(struct net_device *dev)
11186{
11187 struct bnx2x *bp = netdev_priv(dev);
11188 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11189 int port = BP_PORT(bp);
11190
11191 if (bp->state != BNX2X_STATE_OPEN) {
11192 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11193 return;
11194 }
11195
11196 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11197
11198 if (dev->flags & IFF_PROMISC)
11199 rx_mode = BNX2X_RX_MODE_PROMISC;
11200
11201 else if ((dev->flags & IFF_ALLMULTI) ||
11202 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11203 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11204
11205 else { /* some multicasts */
11206 if (CHIP_IS_E1(bp)) {
11207 int i, old, offset;
11208 struct dev_mc_list *mclist;
11209 struct mac_configuration_cmd *config =
11210 bnx2x_sp(bp, mcast_config);
11211
11212 for (i = 0, mclist = dev->mc_list;
11213 mclist && (i < dev->mc_count);
11214 i++, mclist = mclist->next) {
11215
11216 config->config_table[i].
11217 cam_entry.msb_mac_addr =
11218 swab16(*(u16 *)&mclist->dmi_addr[0]);
11219 config->config_table[i].
11220 cam_entry.middle_mac_addr =
11221 swab16(*(u16 *)&mclist->dmi_addr[2]);
11222 config->config_table[i].
11223 cam_entry.lsb_mac_addr =
11224 swab16(*(u16 *)&mclist->dmi_addr[4]);
11225 config->config_table[i].cam_entry.flags =
11226 cpu_to_le16(port);
11227 config->config_table[i].
11228 target_table_entry.flags = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -070011229 config->config_table[i].target_table_entry.
11230 clients_bit_vector =
11231 cpu_to_le32(1 << BP_L_ID(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011232 config->config_table[i].
11233 target_table_entry.vlan_id = 0;
11234
11235 DP(NETIF_MSG_IFUP,
11236 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11237 config->config_table[i].
11238 cam_entry.msb_mac_addr,
11239 config->config_table[i].
11240 cam_entry.middle_mac_addr,
11241 config->config_table[i].
11242 cam_entry.lsb_mac_addr);
11243 }
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011244 old = config->hdr.length;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011245 if (old > i) {
11246 for (; i < old; i++) {
11247 if (CAM_IS_INVALID(config->
11248 config_table[i])) {
Eilon Greensteinaf246402009-01-14 06:43:59 +000011249 /* already invalidated */
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011250 break;
11251 }
11252 /* invalidate */
11253 CAM_INVALIDATE(config->
11254 config_table[i]);
11255 }
11256 }
11257
11258 if (CHIP_REV_IS_SLOW(bp))
11259 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11260 else
11261 offset = BNX2X_MAX_MULTICAST*(1 + port);
11262
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011263 config->hdr.length = i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011264 config->hdr.offset = offset;
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080011265 config->hdr.client_id = bp->fp->cl_id;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011266 config->hdr.reserved1 = 0;
11267
11268 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11269 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11270 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11271 0);
11272 } else { /* E1H */
11273 /* Accept one or more multicasts */
11274 struct dev_mc_list *mclist;
11275 u32 mc_filter[MC_HASH_SIZE];
11276 u32 crc, bit, regidx;
11277 int i;
11278
11279 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11280
11281 for (i = 0, mclist = dev->mc_list;
11282 mclist && (i < dev->mc_count);
11283 i++, mclist = mclist->next) {
11284
Johannes Berg7c510e42008-10-27 17:47:26 -070011285 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11286 mclist->dmi_addr);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011287
11288 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11289 bit = (crc >> 24) & 0xff;
11290 regidx = bit >> 5;
11291 bit &= 0x1f;
11292 mc_filter[regidx] |= (1 << bit);
11293 }
11294
11295 for (i = 0; i < MC_HASH_SIZE; i++)
11296 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11297 mc_filter[i]);
11298 }
11299 }
11300
11301 bp->rx_mode = rx_mode;
11302 bnx2x_set_storm_rx_mode(bp);
11303}
11304
11305/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011306static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11307{
11308 struct sockaddr *addr = p;
11309 struct bnx2x *bp = netdev_priv(dev);
11310
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011311 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011312 return -EINVAL;
11313
11314 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011315 if (netif_running(dev)) {
11316 if (CHIP_IS_E1(bp))
Yitchak Gertner3101c2b2008-08-13 15:52:28 -070011317 bnx2x_set_mac_addr_e1(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011318 else
Yitchak Gertner3101c2b2008-08-13 15:52:28 -070011319 bnx2x_set_mac_addr_e1h(bp, 1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011320 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011321
11322 return 0;
11323}
11324
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011325/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011326static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11327 int devad, u16 addr)
11328{
11329 struct bnx2x *bp = netdev_priv(netdev);
11330 u16 value;
11331 int rc;
11332 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11333
11334 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11335 prtad, devad, addr);
11336
11337 if (prtad != bp->mdio.prtad) {
11338 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11339 prtad, bp->mdio.prtad);
11340 return -EINVAL;
11341 }
11342
11343 /* The HW expects different devad if CL22 is used */
11344 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11345
11346 bnx2x_acquire_phy_lock(bp);
11347 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11348 devad, addr, &value);
11349 bnx2x_release_phy_lock(bp);
11350 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11351
11352 if (!rc)
11353 rc = value;
11354 return rc;
11355}
11356
11357/* called with rtnl_lock */
11358static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11359 u16 addr, u16 value)
11360{
11361 struct bnx2x *bp = netdev_priv(netdev);
11362 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11363 int rc;
11364
11365 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11366 " value 0x%x\n", prtad, devad, addr, value);
11367
11368 if (prtad != bp->mdio.prtad) {
11369 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11370 prtad, bp->mdio.prtad);
11371 return -EINVAL;
11372 }
11373
11374 /* The HW expects different devad if CL22 is used */
11375 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11376
11377 bnx2x_acquire_phy_lock(bp);
11378 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11379 devad, addr, value);
11380 bnx2x_release_phy_lock(bp);
11381 return rc;
11382}
11383
11384/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011385static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11386{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011387 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011388 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011389
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011390 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11391 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011392
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011393 if (!netif_running(dev))
11394 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -070011395
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011396 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011397}
11398
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011399/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011400static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11401{
11402 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011403 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011404
11405 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11406 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11407 return -EINVAL;
11408
11409 /* This does not race with packet allocation
Eliezer Tamirc14423f2008-02-28 11:49:42 -080011410 * because the actual alloc size is
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011411 * only updated as part of load
11412 */
11413 dev->mtu = new_mtu;
11414
11415 if (netif_running(dev)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011416 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11417 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011418 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011419
11420 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011421}
11422
11423static void bnx2x_tx_timeout(struct net_device *dev)
11424{
11425 struct bnx2x *bp = netdev_priv(dev);
11426
11427#ifdef BNX2X_STOP_ON_ERROR
11428 if (!bp->panic)
11429 bnx2x_panic();
11430#endif
11431 /* This allows the netif to be shutdown gracefully before resetting */
11432 schedule_work(&bp->reset_task);
11433}
11434
11435#ifdef BCM_VLAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011436/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011437static void bnx2x_vlan_rx_register(struct net_device *dev,
11438 struct vlan_group *vlgrp)
11439{
11440 struct bnx2x *bp = netdev_priv(dev);
11441
11442 bp->vlgrp = vlgrp;
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011443
11444 /* Set flags according to the required capabilities */
11445 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11446
11447 if (dev->features & NETIF_F_HW_VLAN_TX)
11448 bp->flags |= HW_VLAN_TX_FLAG;
11449
11450 if (dev->features & NETIF_F_HW_VLAN_RX)
11451 bp->flags |= HW_VLAN_RX_FLAG;
11452
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011453 if (netif_running(dev))
Eliezer Tamir49d66772008-02-28 11:53:13 -080011454 bnx2x_set_client_config(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011455}
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011456
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011457#endif
11458
11459#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11460static void poll_bnx2x(struct net_device *dev)
11461{
11462 struct bnx2x *bp = netdev_priv(dev);
11463
11464 disable_irq(bp->pdev->irq);
11465 bnx2x_interrupt(bp->pdev->irq, dev);
11466 enable_irq(bp->pdev->irq);
11467}
11468#endif
11469
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011470static const struct net_device_ops bnx2x_netdev_ops = {
11471 .ndo_open = bnx2x_open,
11472 .ndo_stop = bnx2x_close,
11473 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +000011474 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011475 .ndo_set_mac_address = bnx2x_change_mac_addr,
11476 .ndo_validate_addr = eth_validate_addr,
11477 .ndo_do_ioctl = bnx2x_ioctl,
11478 .ndo_change_mtu = bnx2x_change_mtu,
11479 .ndo_tx_timeout = bnx2x_tx_timeout,
11480#ifdef BCM_VLAN
11481 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11482#endif
11483#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11484 .ndo_poll_controller = poll_bnx2x,
11485#endif
11486};
11487
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011488static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11489 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011490{
11491 struct bnx2x *bp;
11492 int rc;
11493
11494 SET_NETDEV_DEV(dev, &pdev->dev);
11495 bp = netdev_priv(dev);
11496
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011497 bp->dev = dev;
11498 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011499 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011500 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011501
11502 rc = pci_enable_device(pdev);
11503 if (rc) {
11504 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11505 goto err_out;
11506 }
11507
11508 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11509 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11510 " aborting\n");
11511 rc = -ENODEV;
11512 goto err_out_disable;
11513 }
11514
11515 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11516 printk(KERN_ERR PFX "Cannot find second PCI device"
11517 " base address, aborting\n");
11518 rc = -ENODEV;
11519 goto err_out_disable;
11520 }
11521
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011522 if (atomic_read(&pdev->enable_cnt) == 1) {
11523 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11524 if (rc) {
11525 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11526 " aborting\n");
11527 goto err_out_disable;
11528 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011529
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011530 pci_set_master(pdev);
11531 pci_save_state(pdev);
11532 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011533
11534 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11535 if (bp->pm_cap == 0) {
11536 printk(KERN_ERR PFX "Cannot find power management"
11537 " capability, aborting\n");
11538 rc = -EIO;
11539 goto err_out_release;
11540 }
11541
11542 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11543 if (bp->pcie_cap == 0) {
11544 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11545 " aborting\n");
11546 rc = -EIO;
11547 goto err_out_release;
11548 }
11549
Yang Hongyang6a355282009-04-06 19:01:13 -070011550 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011551 bp->flags |= USING_DAC_FLAG;
Yang Hongyang6a355282009-04-06 19:01:13 -070011552 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011553 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11554 " failed, aborting\n");
11555 rc = -EIO;
11556 goto err_out_release;
11557 }
11558
Yang Hongyang284901a2009-04-06 19:01:15 -070011559 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011560 printk(KERN_ERR PFX "System does not support DMA,"
11561 " aborting\n");
11562 rc = -EIO;
11563 goto err_out_release;
11564 }
11565
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011566 dev->mem_start = pci_resource_start(pdev, 0);
11567 dev->base_addr = dev->mem_start;
11568 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011569
11570 dev->irq = pdev->irq;
11571
Arjan van de Ven275f1652008-10-20 21:42:39 -070011572 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011573 if (!bp->regview) {
11574 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11575 rc = -ENOMEM;
11576 goto err_out_release;
11577 }
11578
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011579 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11580 min_t(u64, BNX2X_DB_SIZE,
11581 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011582 if (!bp->doorbells) {
11583 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11584 rc = -ENOMEM;
11585 goto err_out_unmap;
11586 }
11587
11588 bnx2x_set_power_state(bp, PCI_D0);
11589
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011590 /* clean indirect addresses */
11591 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11592 PCICFG_VENDOR_ID_OFFSET);
11593 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11594 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11595 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11596 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011597
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011598 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011599
Stephen Hemmingerc64213c2008-11-21 17:36:04 -080011600 dev->netdev_ops = &bnx2x_netdev_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011601 dev->ethtool_ops = &bnx2x_ethtool_ops;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011602 dev->features |= NETIF_F_SG;
11603 dev->features |= NETIF_F_HW_CSUM;
11604 if (bp->flags & USING_DAC_FLAG)
11605 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011606 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11607 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011608#ifdef BCM_VLAN
11609 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080011610 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +000011611
11612 dev->vlan_features |= NETIF_F_SG;
11613 dev->vlan_features |= NETIF_F_HW_CSUM;
11614 if (bp->flags & USING_DAC_FLAG)
11615 dev->vlan_features |= NETIF_F_HIGHDMA;
11616 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11617 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011618#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011619
Eilon Greenstein01cd4522009-08-12 08:23:08 +000011620 /* get_port_hwinfo() will set prtad and mmds properly */
11621 bp->mdio.prtad = MDIO_PRTAD_NONE;
11622 bp->mdio.mmds = 0;
11623 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11624 bp->mdio.dev = dev;
11625 bp->mdio.mdio_read = bnx2x_mdio_read;
11626 bp->mdio.mdio_write = bnx2x_mdio_write;
11627
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011628 return 0;
11629
11630err_out_unmap:
11631 if (bp->regview) {
11632 iounmap(bp->regview);
11633 bp->regview = NULL;
11634 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011635 if (bp->doorbells) {
11636 iounmap(bp->doorbells);
11637 bp->doorbells = NULL;
11638 }
11639
11640err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011641 if (atomic_read(&pdev->enable_cnt) == 1)
11642 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011643
11644err_out_disable:
11645 pci_disable_device(pdev);
11646 pci_set_drvdata(pdev, NULL);
11647
11648err_out:
11649 return rc;
11650}
11651
Eliezer Tamir25047952008-02-28 11:50:16 -080011652static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11653{
11654 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11655
11656 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11657 return val;
11658}
11659
11660/* return value of 1=2.5GHz 2=5GHz */
11661static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11662{
11663 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11664
11665 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11666 return val;
11667}
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011668static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11669{
11670 struct bnx2x_fw_file_hdr *fw_hdr;
11671 struct bnx2x_fw_file_section *sections;
11672 u16 *ops_offsets;
11673 u32 offset, len, num_ops;
11674 int i;
11675 const struct firmware *firmware = bp->firmware;
11676 const u8 * fw_ver;
11677
11678 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11679 return -EINVAL;
11680
11681 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11682 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11683
11684 /* Make sure none of the offsets and sizes make us read beyond
11685 * the end of the firmware data */
11686 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11687 offset = be32_to_cpu(sections[i].offset);
11688 len = be32_to_cpu(sections[i].len);
11689 if (offset + len > firmware->size) {
11690 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11691 return -EINVAL;
11692 }
11693 }
11694
11695 /* Likewise for the init_ops offsets */
11696 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11697 ops_offsets = (u16 *)(firmware->data + offset);
11698 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11699
11700 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11701 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11702 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11703 return -EINVAL;
11704 }
11705 }
11706
11707 /* Check FW version */
11708 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11709 fw_ver = firmware->data + offset;
11710 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11711 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11712 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11713 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11714 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11715 " Should be %d.%d.%d.%d\n",
11716 fw_ver[0], fw_ver[1], fw_ver[2],
11717 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11718 BCM_5710_FW_MINOR_VERSION,
11719 BCM_5710_FW_REVISION_VERSION,
11720 BCM_5710_FW_ENGINEERING_VERSION);
11721 return -EINVAL;
11722 }
11723
11724 return 0;
11725}
11726
11727static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11728{
11729 u32 i;
11730 const __be32 *source = (const __be32*)_source;
11731 u32 *target = (u32*)_target;
11732
11733 for (i = 0; i < n/4; i++)
11734 target[i] = be32_to_cpu(source[i]);
11735}
11736
11737/*
11738 Ops array is stored in the following format:
11739 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11740 */
11741static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11742{
11743 u32 i, j, tmp;
11744 const __be32 *source = (const __be32*)_source;
11745 struct raw_op *target = (struct raw_op*)_target;
11746
11747 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11748 tmp = be32_to_cpu(source[j]);
11749 target[i].op = (tmp >> 24) & 0xff;
11750 target[i].offset = tmp & 0xffffff;
11751 target[i].raw_data = be32_to_cpu(source[j+1]);
11752 }
11753}
11754static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11755{
11756 u32 i;
11757 u16 *target = (u16*)_target;
11758 const __be16 *source = (const __be16*)_source;
11759
11760 for (i = 0; i < n/2; i++)
11761 target[i] = be16_to_cpu(source[i]);
11762}
11763
11764#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11765 do { \
11766 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11767 bp->arr = kmalloc(len, GFP_KERNEL); \
11768 if (!bp->arr) { \
11769 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11770 goto lbl; \
11771 } \
11772 func(bp->firmware->data + \
11773 be32_to_cpu(fw_hdr->arr.offset), \
11774 (u8*)bp->arr, len); \
11775 } while (0)
11776
11777
11778static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11779{
11780 char fw_file_name[40] = {0};
11781 int rc, offset;
11782 struct bnx2x_fw_file_hdr *fw_hdr;
11783
11784 /* Create a FW file name */
11785 if (CHIP_IS_E1(bp))
11786 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11787 else
11788 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11789
11790 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11791 BCM_5710_FW_MAJOR_VERSION,
11792 BCM_5710_FW_MINOR_VERSION,
11793 BCM_5710_FW_REVISION_VERSION,
11794 BCM_5710_FW_ENGINEERING_VERSION);
11795
11796 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11797
11798 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11799 if (rc) {
11800 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11801 goto request_firmware_exit;
11802 }
11803
11804 rc = bnx2x_check_firmware(bp);
11805 if (rc) {
11806 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11807 goto request_firmware_exit;
11808 }
11809
11810 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11811
11812 /* Initialize the pointers to the init arrays */
11813 /* Blob */
11814 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11815
11816 /* Opcodes */
11817 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11818
11819 /* Offsets */
11820 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11821
11822 /* STORMs firmware */
11823 bp->tsem_int_table_data = bp->firmware->data +
11824 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11825 bp->tsem_pram_data = bp->firmware->data +
11826 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11827 bp->usem_int_table_data = bp->firmware->data +
11828 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11829 bp->usem_pram_data = bp->firmware->data +
11830 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11831 bp->xsem_int_table_data = bp->firmware->data +
11832 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11833 bp->xsem_pram_data = bp->firmware->data +
11834 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11835 bp->csem_int_table_data = bp->firmware->data +
11836 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11837 bp->csem_pram_data = bp->firmware->data +
11838 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11839
11840 return 0;
11841init_offsets_alloc_err:
11842 kfree(bp->init_ops);
11843init_ops_alloc_err:
11844 kfree(bp->init_data);
11845request_firmware_exit:
11846 release_firmware(bp->firmware);
11847
11848 return rc;
11849}
11850
11851
Eliezer Tamir25047952008-02-28 11:50:16 -080011852
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011853static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11854 const struct pci_device_id *ent)
11855{
11856 static int version_printed;
11857 struct net_device *dev = NULL;
11858 struct bnx2x *bp;
Eliezer Tamir25047952008-02-28 11:50:16 -080011859 int rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011860
11861 if (version_printed++ == 0)
11862 printk(KERN_INFO "%s", version);
11863
11864 /* dev zeroed in init_etherdev */
Eilon Greenstein555f6c72009-02-12 08:36:11 +000011865 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011866 if (!dev) {
11867 printk(KERN_ERR PFX "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011868 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011869 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011870
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011871 bp = netdev_priv(dev);
11872 bp->msglevel = debug;
11873
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011874 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011875 if (rc < 0) {
11876 free_netdev(dev);
11877 return rc;
11878 }
11879
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011880 pci_set_drvdata(pdev, dev);
11881
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011882 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000011883 if (rc)
11884 goto init_one_exit;
11885
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011886 /* Set init arrays */
11887 rc = bnx2x_init_firmware(bp, &pdev->dev);
11888 if (rc) {
11889 printk(KERN_ERR PFX "Error loading firmware\n");
11890 goto init_one_exit;
11891 }
11892
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000011893 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011894 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +000011895 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011896 goto init_one_exit;
11897 }
11898
Eliezer Tamir25047952008-02-28 11:50:16 -080011899 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
Eilon Greenstein87942b42009-02-12 08:36:49 +000011900 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011901 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Eliezer Tamir25047952008-02-28 11:50:16 -080011902 bnx2x_get_pcie_width(bp),
11903 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11904 dev->base_addr, bp->pdev->irq);
Johannes Berge1749612008-10-27 15:59:26 -070011905 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +000011906
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011907 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011908
11909init_one_exit:
11910 if (bp->regview)
11911 iounmap(bp->regview);
11912
11913 if (bp->doorbells)
11914 iounmap(bp->doorbells);
11915
11916 free_netdev(dev);
11917
11918 if (atomic_read(&pdev->enable_cnt) == 1)
11919 pci_release_regions(pdev);
11920
11921 pci_disable_device(pdev);
11922 pci_set_drvdata(pdev, NULL);
11923
11924 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011925}
11926
11927static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11928{
11929 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011930 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011931
Eliezer Tamir228241e2008-02-28 11:56:57 -080011932 if (!dev) {
Eliezer Tamir228241e2008-02-28 11:56:57 -080011933 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11934 return;
11935 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080011936 bp = netdev_priv(dev);
11937
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011938 unregister_netdev(dev);
11939
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070011940 kfree(bp->init_ops_offsets);
11941 kfree(bp->init_ops);
11942 kfree(bp->init_data);
11943 release_firmware(bp->firmware);
11944
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011945 if (bp->regview)
11946 iounmap(bp->regview);
11947
11948 if (bp->doorbells)
11949 iounmap(bp->doorbells);
11950
11951 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011952
11953 if (atomic_read(&pdev->enable_cnt) == 1)
11954 pci_release_regions(pdev);
11955
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011956 pci_disable_device(pdev);
11957 pci_set_drvdata(pdev, NULL);
11958}
11959
11960static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11961{
11962 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011963 struct bnx2x *bp;
11964
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011965 if (!dev) {
11966 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11967 return -ENODEV;
11968 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080011969 bp = netdev_priv(dev);
11970
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011971 rtnl_lock();
11972
11973 pci_save_state(pdev);
11974
11975 if (!netif_running(dev)) {
11976 rtnl_unlock();
11977 return 0;
11978 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011979
11980 netif_device_detach(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011981
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070011982 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011983
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011984 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
Eliezer Tamir228241e2008-02-28 11:56:57 -080011985
Eilon Greenstein34f80b02008-06-23 20:33:01 -070011986 rtnl_unlock();
11987
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011988 return 0;
11989}
11990
11991static int bnx2x_resume(struct pci_dev *pdev)
11992{
11993 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -080011994 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011995 int rc;
11996
Eliezer Tamir228241e2008-02-28 11:56:57 -080011997 if (!dev) {
11998 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11999 return -ENODEV;
12000 }
Eliezer Tamir228241e2008-02-28 11:56:57 -080012001 bp = netdev_priv(dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012002
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012003 rtnl_lock();
12004
Eliezer Tamir228241e2008-02-28 11:56:57 -080012005 pci_restore_state(pdev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012006
12007 if (!netif_running(dev)) {
12008 rtnl_unlock();
12009 return 0;
12010 }
12011
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012012 bnx2x_set_power_state(bp, PCI_D0);
12013 netif_device_attach(dev);
12014
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -070012015 rc = bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012016
Eilon Greenstein34f80b02008-06-23 20:33:01 -070012017 rtnl_unlock();
12018
12019 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012020}
12021
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012022static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12023{
12024 int i;
12025
12026 bp->state = BNX2X_STATE_ERROR;
12027
12028 bp->rx_mode = BNX2X_RX_MODE_NONE;
12029
12030 bnx2x_netif_stop(bp, 0);
12031
12032 del_timer_sync(&bp->timer);
12033 bp->stats_state = STATS_STATE_DISABLED;
12034 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12035
12036 /* Release IRQs */
12037 bnx2x_free_irq(bp);
12038
12039 if (CHIP_IS_E1(bp)) {
12040 struct mac_configuration_cmd *config =
12041 bnx2x_sp(bp, mcast_config);
12042
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -080012043 for (i = 0; i < config->hdr.length; i++)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012044 CAM_INVALIDATE(config->config_table[i]);
12045 }
12046
12047 /* Free SKBs, SGEs, TPA pool and driver internals */
12048 bnx2x_free_skbs(bp);
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012049 for_each_rx_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012050 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Eilon Greenstein555f6c72009-02-12 08:36:11 +000012051 for_each_rx_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +000012052 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012053 bnx2x_free_mem(bp);
12054
12055 bp->state = BNX2X_STATE_CLOSED;
12056
12057 netif_carrier_off(bp->dev);
12058
12059 return 0;
12060}
12061
12062static void bnx2x_eeh_recover(struct bnx2x *bp)
12063{
12064 u32 val;
12065
12066 mutex_init(&bp->port.phy_mutex);
12067
12068 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12069 bp->link_params.shmem_base = bp->common.shmem_base;
12070 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12071
12072 if (!bp->common.shmem_base ||
12073 (bp->common.shmem_base < 0xA0000) ||
12074 (bp->common.shmem_base >= 0xC0000)) {
12075 BNX2X_DEV_INFO("MCP not active\n");
12076 bp->flags |= NO_MCP_FLAG;
12077 return;
12078 }
12079
12080 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12081 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12082 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12083 BNX2X_ERR("BAD MCP validity signature\n");
12084
12085 if (!BP_NOMCP(bp)) {
12086 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12087 & DRV_MSG_SEQ_NUMBER_MASK);
12088 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12089 }
12090}
12091
Wendy Xiong493adb12008-06-23 20:36:22 -070012092/**
12093 * bnx2x_io_error_detected - called when PCI error is detected
12094 * @pdev: Pointer to PCI device
12095 * @state: The current pci connection state
12096 *
12097 * This function is called after a PCI bus error affecting
12098 * this device has been detected.
12099 */
12100static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12101 pci_channel_state_t state)
12102{
12103 struct net_device *dev = pci_get_drvdata(pdev);
12104 struct bnx2x *bp = netdev_priv(dev);
12105
12106 rtnl_lock();
12107
12108 netif_device_detach(dev);
12109
Dean Nelson07ce50e2009-07-31 09:13:25 +000012110 if (state == pci_channel_io_perm_failure) {
12111 rtnl_unlock();
12112 return PCI_ERS_RESULT_DISCONNECT;
12113 }
12114
Wendy Xiong493adb12008-06-23 20:36:22 -070012115 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012116 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -070012117
12118 pci_disable_device(pdev);
12119
12120 rtnl_unlock();
12121
12122 /* Request a slot reset */
12123 return PCI_ERS_RESULT_NEED_RESET;
12124}
12125
12126/**
12127 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12128 * @pdev: Pointer to PCI device
12129 *
12130 * Restart the card from scratch, as if from a cold-boot.
12131 */
12132static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12133{
12134 struct net_device *dev = pci_get_drvdata(pdev);
12135 struct bnx2x *bp = netdev_priv(dev);
12136
12137 rtnl_lock();
12138
12139 if (pci_enable_device(pdev)) {
12140 dev_err(&pdev->dev,
12141 "Cannot re-enable PCI device after reset\n");
12142 rtnl_unlock();
12143 return PCI_ERS_RESULT_DISCONNECT;
12144 }
12145
12146 pci_set_master(pdev);
12147 pci_restore_state(pdev);
12148
12149 if (netif_running(dev))
12150 bnx2x_set_power_state(bp, PCI_D0);
12151
12152 rtnl_unlock();
12153
12154 return PCI_ERS_RESULT_RECOVERED;
12155}
12156
12157/**
12158 * bnx2x_io_resume - called when traffic can start flowing again
12159 * @pdev: Pointer to PCI device
12160 *
12161 * This callback is called when the error recovery driver tells us that
12162 * its OK to resume normal operation.
12163 */
12164static void bnx2x_io_resume(struct pci_dev *pdev)
12165{
12166 struct net_device *dev = pci_get_drvdata(pdev);
12167 struct bnx2x *bp = netdev_priv(dev);
12168
12169 rtnl_lock();
12170
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012171 bnx2x_eeh_recover(bp);
12172
Wendy Xiong493adb12008-06-23 20:36:22 -070012173 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -070012174 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -070012175
12176 netif_device_attach(dev);
12177
12178 rtnl_unlock();
12179}
12180
12181static struct pci_error_handlers bnx2x_err_handler = {
12182 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +000012183 .slot_reset = bnx2x_io_slot_reset,
12184 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -070012185};
12186
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012187static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -070012188 .name = DRV_MODULE_NAME,
12189 .id_table = bnx2x_pci_tbl,
12190 .probe = bnx2x_init_one,
12191 .remove = __devexit_p(bnx2x_remove_one),
12192 .suspend = bnx2x_suspend,
12193 .resume = bnx2x_resume,
12194 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012195};
12196
12197static int __init bnx2x_init(void)
12198{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012199 int ret;
12200
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012201 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12202 if (bnx2x_wq == NULL) {
12203 printk(KERN_ERR PFX "Cannot create workqueue\n");
12204 return -ENOMEM;
12205 }
12206
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +000012207 ret = pci_register_driver(&bnx2x_pci_driver);
12208 if (ret) {
12209 printk(KERN_ERR PFX "Cannot register driver\n");
12210 destroy_workqueue(bnx2x_wq);
12211 }
12212 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012213}
12214
12215static void __exit bnx2x_cleanup(void)
12216{
12217 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -080012218
12219 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020012220}
12221
12222module_init(bnx2x_init);
12223module_exit(bnx2x_cleanup);
12224
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070012225