blob: 012c093cb4322e20afb3bd1013f7b454a62054ce [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070059#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h"
61/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000062#define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000067#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000069#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070070
Eilon Greenstein34f80b02008-06-23 20:33:01 -070071/* Time in jiffies before concluding the transmitter is hung */
72#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020073
Andrew Morton53a10562008-02-09 23:16:41 -080074static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020076 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070078MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000079MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020081MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000083MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000085MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020086
Eilon Greenstein555f6c72009-02-12 08:36:11 +000087static int multi_mode = 1;
88module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070089MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
91
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000092int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000093module_param(num_queues, int, 0);
94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000096
Eilon Greenstein19680c42008-08-13 15:47:33 -070097static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070098module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000099MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000100
101static int int_mode;
102module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000103MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000105
Eilon Greensteina18f5122009-08-12 08:23:26 +0000106static int dropless_fc;
107module_param(dropless_fc, int, 0);
108MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
Eilon Greenstein9898f862009-02-12 08:38:27 +0000110static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200111module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000112MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000113
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
Eilon Greenstein9898f862009-02-12 08:38:27 +0000118static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200119module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800122static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200123
124enum bnx2x_board_type {
125 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700126 BCM57711 = 1,
127 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000128 BCM57712 = 3,
129 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200130};
131
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700132/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800133static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200134 char *name;
135} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200141};
142
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000143#ifndef PCI_DEVICE_ID_NX2_57712
144#define PCI_DEVICE_ID_NX2_57712 0x1662
145#endif
146#ifndef PCI_DEVICE_ID_NX2_57712E
147#define PCI_DEVICE_ID_NX2_57712E 0x1663
148#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700149
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200403/* used only at init
404 * locking is done by mcp
405 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000406void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200425
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
432void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
433{
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435
436 switch (dmae->opcode & DMAE_COMMAND_DST) {
437 case DMAE_CMD_DST_PCI:
438 if (src_type == DMAE_CMD_SRC_PCI)
439 DP(msglvl, "DMAE: opcode 0x%08x\n"
440 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
442 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444 dmae->comp_addr_hi, dmae->comp_addr_lo,
445 dmae->comp_val);
446 else
447 DP(msglvl, "DMAE: opcode 0x%08x\n"
448 "src [%08x], len [%d*4], dst [%x:%08x]\n"
449 "comp_addr [%x:%08x], comp_val 0x%08x\n",
450 dmae->opcode, dmae->src_addr_lo >> 2,
451 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452 dmae->comp_addr_hi, dmae->comp_addr_lo,
453 dmae->comp_val);
454 break;
455 case DMAE_CMD_DST_GRC:
456 if (src_type == DMAE_CMD_SRC_PCI)
457 DP(msglvl, "DMAE: opcode 0x%08x\n"
458 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459 "comp_addr [%x:%08x], comp_val 0x%08x\n",
460 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461 dmae->len, dmae->dst_addr_lo >> 2,
462 dmae->comp_addr_hi, dmae->comp_addr_lo,
463 dmae->comp_val);
464 else
465 DP(msglvl, "DMAE: opcode 0x%08x\n"
466 "src [%08x], len [%d*4], dst [%08x]\n"
467 "comp_addr [%x:%08x], comp_val 0x%08x\n",
468 dmae->opcode, dmae->src_addr_lo >> 2,
469 dmae->len, dmae->dst_addr_lo >> 2,
470 dmae->comp_addr_hi, dmae->comp_addr_lo,
471 dmae->comp_val);
472 break;
473 default:
474 if (src_type == DMAE_CMD_SRC_PCI)
475 DP(msglvl, "DMAE: opcode 0x%08x\n"
476 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
477 "dst_addr [none]\n"
478 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
479 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
481 dmae->comp_val);
482 else
483 DP(msglvl, "DMAE: opcode 0x%08x\n"
484 DP_LEVEL "src_addr [%08x] len [%d * 4] "
485 "dst_addr [none]\n"
486 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
487 dmae->opcode, dmae->src_addr_lo >> 2,
488 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
489 dmae->comp_val);
490 break;
491 }
492
493}
494
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000495const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200496 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
497 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
498 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
499 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
500};
501
502/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000503void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200504{
505 u32 cmd_offset;
506 int i;
507
508 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
509 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
510 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
511
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700512 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200514 }
515 REG_WR(bp, dmae_reg_go_c[idx], 1);
516}
517
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000518u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
519{
520 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
521 DMAE_CMD_C_ENABLE);
522}
523
524u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
525{
526 return opcode & ~DMAE_CMD_SRC_RESET;
527}
528
529u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
530 bool with_comp, u8 comp_type)
531{
532 u32 opcode = 0;
533
534 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535 (dst_type << DMAE_COMMAND_DST_SHIFT));
536
537 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538
539 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
543
544#ifdef __BIG_ENDIAN
545 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
546#else
547 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
548#endif
549 if (with_comp)
550 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
551 return opcode;
552}
553
554void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
556{
557 memset(dmae, 0, sizeof(struct dmae_command));
558
559 /* set the opcode */
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
562
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
567}
568
569/* issue a dmae command over the init-channel and wailt for completion */
570int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
571{
572 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
574 int rc = 0;
575
576 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
579
580 /* lock the dmae channel */
581 mutex_lock(&bp->dmae_mutex);
582
583 /* reset completion */
584 *wb_comp = 0;
585
586 /* post the command on the channel used for initializations */
587 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
588
589 /* wait for completion */
590 udelay(5);
591 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
592 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
593
594 if (!cnt) {
595 BNX2X_ERR("DMAE timeout!\n");
596 rc = DMAE_TIMEOUT;
597 goto unlock;
598 }
599 cnt--;
600 udelay(50);
601 }
602 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
603 BNX2X_ERR("DMAE PCI error!\n");
604 rc = DMAE_PCI_ERROR;
605 }
606
607 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
610
611unlock:
612 mutex_unlock(&bp->dmae_mutex);
613 return rc;
614}
615
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700616void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
617 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200618{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000619 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700620
621 if (!bp->dmae_ready) {
622 u32 *data = bnx2x_sp(bp, wb_data[0]);
623
624 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
625 " using indirect\n", dst_addr, len32);
626 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
627 return;
628 }
629
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000630 /* set opcode and fixed command fields */
631 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200632
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000633 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000634 dmae.src_addr_lo = U64_LO(dma_addr);
635 dmae.src_addr_hi = U64_HI(dma_addr);
636 dmae.dst_addr_lo = dst_addr >> 2;
637 dmae.dst_addr_hi = 0;
638 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200639
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000640 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200641
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000642 /* issue the command and wait for completion */
643 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200644}
645
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700646void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000648 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700649
650 if (!bp->dmae_ready) {
651 u32 *data = bnx2x_sp(bp, wb_data[0]);
652 int i;
653
654 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
655 " using indirect\n", src_addr, len32);
656 for (i = 0; i < len32; i++)
657 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
658 return;
659 }
660
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000661 /* set opcode and fixed command fields */
662 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200663
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000664 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000665 dmae.src_addr_lo = src_addr >> 2;
666 dmae.src_addr_hi = 0;
667 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
668 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
669 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000671 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200672
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000673 /* issue the command and wait for completion */
674 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200675}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200676
Eilon Greenstein573f2032009-08-12 08:24:14 +0000677void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
678 u32 addr, u32 len)
679{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000680 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000681 int offset = 0;
682
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000683 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000684 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000685 addr + offset, dmae_wr_max);
686 offset += dmae_wr_max * 4;
687 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000688 }
689
690 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
691}
692
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700693/* used only for slowpath so not inlined */
694static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
695{
696 u32 wb_write[2];
697
698 wb_write[0] = val_hi;
699 wb_write[1] = val_lo;
700 REG_WR_DMAE(bp, reg, wb_write, 2);
701}
702
703#ifdef USE_WB_RD
704static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
705{
706 u32 wb_data[2];
707
708 REG_RD_DMAE(bp, reg, wb_data, 2);
709
710 return HILO_U64(wb_data[0], wb_data[1]);
711}
712#endif
713
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200714static int bnx2x_mc_assert(struct bnx2x *bp)
715{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200716 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700717 int i, rc = 0;
718 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200719
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700720 /* XSTORM */
721 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
722 XSTORM_ASSERT_LIST_INDEX_OFFSET);
723 if (last_idx)
724 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200725
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700726 /* print the asserts */
727 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200728
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700729 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_OFFSET(i));
731 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
732 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
733 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
734 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
735 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
736 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700738 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
739 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740 " 0x%08x 0x%08x 0x%08x\n",
741 i, row3, row2, row1, row0);
742 rc++;
743 } else {
744 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200745 }
746 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700747
748 /* TSTORM */
749 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
750 TSTORM_ASSERT_LIST_INDEX_OFFSET);
751 if (last_idx)
752 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
753
754 /* print the asserts */
755 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
756
757 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_OFFSET(i));
759 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
760 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
761 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
762 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
763 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
764 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
765
766 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
767 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768 " 0x%08x 0x%08x 0x%08x\n",
769 i, row3, row2, row1, row0);
770 rc++;
771 } else {
772 break;
773 }
774 }
775
776 /* CSTORM */
777 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
778 CSTORM_ASSERT_LIST_INDEX_OFFSET);
779 if (last_idx)
780 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
781
782 /* print the asserts */
783 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
784
785 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_OFFSET(i));
787 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
788 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
789 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
790 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
791 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
792 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
793
794 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
795 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796 " 0x%08x 0x%08x 0x%08x\n",
797 i, row3, row2, row1, row0);
798 rc++;
799 } else {
800 break;
801 }
802 }
803
804 /* USTORM */
805 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
806 USTORM_ASSERT_LIST_INDEX_OFFSET);
807 if (last_idx)
808 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
809
810 /* print the asserts */
811 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
812
813 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_OFFSET(i));
815 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
816 USTORM_ASSERT_LIST_OFFSET(i) + 4);
817 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
818 USTORM_ASSERT_LIST_OFFSET(i) + 8);
819 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
820 USTORM_ASSERT_LIST_OFFSET(i) + 12);
821
822 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
823 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824 " 0x%08x 0x%08x 0x%08x\n",
825 i, row3, row2, row1, row0);
826 rc++;
827 } else {
828 break;
829 }
830 }
831
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200832 return rc;
833}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800834
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200835static void bnx2x_fw_dump(struct bnx2x *bp)
836{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000837 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200838 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000839 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200840 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000841 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000842 if (BP_NOMCP(bp)) {
843 BNX2X_ERR("NO MCP - can not dump\n");
844 return;
845 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000846
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000847 if (BP_PATH(bp) == 0)
848 trace_shmem_base = bp->common.shmem_base;
849 else
850 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000852 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000853 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854 + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000855 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200856
Joe Perches7995c642010-02-17 15:01:52 +0000857 pr_err("");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000858 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200859 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000860 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200861 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000862 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200863 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000864 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200865 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000866 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000868 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200869 }
Joe Perches7995c642010-02-17 15:01:52 +0000870 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200871}
872
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000873void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200874{
875 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000876 u16 j;
877 struct hc_sp_status_block_data sp_sb_data;
878 int func = BP_FUNC(bp);
879#ifdef BNX2X_STOP_ON_ERROR
880 u16 start = 0, end = 0;
881#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200882
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700883 bp->stats_state = STATS_STATE_DISABLED;
884 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
885
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200886 BNX2X_ERR("begin crash dump -----------------\n");
887
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000888 /* Indices */
889 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000890 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000891 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000892 bp->def_idx, bp->def_att_idx,
893 bp->attn_state, bp->spq_prod_idx);
894 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
895 bp->def_status_blk->atten_status_block.attn_bits,
896 bp->def_status_blk->atten_status_block.attn_bits_ack,
897 bp->def_status_blk->atten_status_block.status_block_id,
898 bp->def_status_blk->atten_status_block.attn_bits_index);
899 BNX2X_ERR(" def (");
900 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
901 pr_cont("0x%x%s",
902 bp->def_status_blk->sp_sb.index_values[i],
903 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000904
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000905 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
908 i*sizeof(u32));
909
910 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
911 "pf_id(0x%x) vnic_id(0x%x) "
912 "vf_id(0x%x) vf_valid (0x%x)\n",
913 sp_sb_data.igu_sb_id,
914 sp_sb_data.igu_seg_id,
915 sp_sb_data.p_func.pf_id,
916 sp_sb_data.p_func.vnic_id,
917 sp_sb_data.p_func.vf_id,
918 sp_sb_data.p_func.vf_valid);
919
920
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000921 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000922 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000923 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000924 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000925 struct hc_status_block_data_e1x sb_data_e1x;
926 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000927 CHIP_IS_E2(bp) ?
928 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000929 sb_data_e1x.common.state_machine;
930 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000931 CHIP_IS_E2(bp) ?
932 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000933 sb_data_e1x.index_data;
934 int data_size;
935 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000936
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000937 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000938 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000939 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000940 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000941 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000942 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000943 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000944 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000945 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000946 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000947 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000948
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000949 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000950 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
951 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
952 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200953 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700954 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000955
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000956 loop = CHIP_IS_E2(bp) ?
957 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000958
959 /* host sb data */
960
961 BNX2X_ERR(" run indexes (");
962 for (j = 0; j < HC_SB_MAX_SM; j++)
963 pr_cont("0x%x%s",
964 fp->sb_running_index[j],
965 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
966
967 BNX2X_ERR(" indexes (");
968 for (j = 0; j < loop; j++)
969 pr_cont("0x%x%s",
970 fp->sb_index_values[j],
971 (j == loop - 1) ? ")" : " ");
972 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000973 data_size = CHIP_IS_E2(bp) ?
974 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000975 sizeof(struct hc_status_block_data_e1x);
976 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000977 sb_data_p = CHIP_IS_E2(bp) ?
978 (u32 *)&sb_data_e2 :
979 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000980 /* copy sb data in here */
981 for (j = 0; j < data_size; j++)
982 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
984 j * sizeof(u32));
985
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000986 if (CHIP_IS_E2(bp)) {
987 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
988 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
989 sb_data_e2.common.p_func.pf_id,
990 sb_data_e2.common.p_func.vf_id,
991 sb_data_e2.common.p_func.vf_valid,
992 sb_data_e2.common.p_func.vnic_id,
993 sb_data_e2.common.same_igu_sb_1b);
994 } else {
995 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
996 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
997 sb_data_e1x.common.p_func.pf_id,
998 sb_data_e1x.common.p_func.vf_id,
999 sb_data_e1x.common.p_func.vf_valid,
1000 sb_data_e1x.common.p_func.vnic_id,
1001 sb_data_e1x.common.same_igu_sb_1b);
1002 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001003
1004 /* SB_SMs data */
1005 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006 pr_cont("SM[%d] __flags (0x%x) "
1007 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1008 "time_to_expire (0x%x) "
1009 "timer_value(0x%x)\n", j,
1010 hc_sm_p[j].__flags,
1011 hc_sm_p[j].igu_sb_id,
1012 hc_sm_p[j].igu_seg_id,
1013 hc_sm_p[j].time_to_expire,
1014 hc_sm_p[j].timer_value);
1015 }
1016
1017 /* Indecies data */
1018 for (j = 0; j < loop; j++) {
1019 pr_cont("INDEX[%d] flags (0x%x) "
1020 "timeout (0x%x)\n", j,
1021 hc_index_p[j].flags,
1022 hc_index_p[j].timeout);
1023 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001024 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001025
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001026#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001027 /* Rings */
1028 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001029 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001030 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001031
1032 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1033 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001034 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001035 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1036 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1037
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001038 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1039 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001040 }
1041
Eilon Greenstein3196a882008-08-13 15:58:49 -07001042 start = RX_SGE(fp->rx_sge_prod);
1043 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001044 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001045 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1046 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1047
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001048 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1049 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001050 }
1051
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001052 start = RCQ_BD(fp->rx_comp_cons - 10);
1053 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001054 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001055 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1056
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001057 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001059 }
1060 }
1061
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001062 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001063 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001064 struct bnx2x_fastpath *fp = &bp->fp[i];
1065
1066 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1067 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1068 for (j = start; j != end; j = TX_BD(j + 1)) {
1069 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1070
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001071 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001073 }
1074
1075 start = TX_BD(fp->tx_bd_cons - 10);
1076 end = TX_BD(fp->tx_bd_cons + 254);
1077 for (j = start; j != end; j = TX_BD(j + 1)) {
1078 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1079
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001080 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001082 }
1083 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001084#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001085 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001086 bnx2x_mc_assert(bp);
1087 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001088}
1089
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001090static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001091{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001092 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001093 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1094 u32 val = REG_RD(bp, addr);
1095 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001096 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001097
1098 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001099 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1100 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001101 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1102 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001103 } else if (msi) {
1104 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1105 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1106 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001108 } else {
1109 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001110 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001111 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001113
Eilon Greenstein8badd272009-02-12 08:36:15 +00001114 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1115 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001116
1117 REG_WR(bp, addr, val);
1118
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001119 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1120 }
1121
Eilon Greenstein8badd272009-02-12 08:36:15 +00001122 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1123 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001124
1125 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001126 /*
1127 * Ensure that HC_CONFIG is written before leading/trailing edge config
1128 */
1129 mmiowb();
1130 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001131
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001132 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001133 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001134 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001135 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001136 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001137 /* enable nig and gpio3 attention */
1138 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001139 } else
1140 val = 0xffff;
1141
1142 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1143 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1144 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001145
1146 /* Make sure that interrupts are indeed enabled from here on */
1147 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001148}
1149
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001150static void bnx2x_igu_int_enable(struct bnx2x *bp)
1151{
1152 u32 val;
1153 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1154 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1155
1156 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1157
1158 if (msix) {
1159 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1160 IGU_PF_CONF_SINGLE_ISR_EN);
1161 val |= (IGU_PF_CONF_FUNC_EN |
1162 IGU_PF_CONF_MSI_MSIX_EN |
1163 IGU_PF_CONF_ATTN_BIT_EN);
1164 } else if (msi) {
1165 val &= ~IGU_PF_CONF_INT_LINE_EN;
1166 val |= (IGU_PF_CONF_FUNC_EN |
1167 IGU_PF_CONF_MSI_MSIX_EN |
1168 IGU_PF_CONF_ATTN_BIT_EN |
1169 IGU_PF_CONF_SINGLE_ISR_EN);
1170 } else {
1171 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1172 val |= (IGU_PF_CONF_FUNC_EN |
1173 IGU_PF_CONF_INT_LINE_EN |
1174 IGU_PF_CONF_ATTN_BIT_EN |
1175 IGU_PF_CONF_SINGLE_ISR_EN);
1176 }
1177
1178 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1179 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1180
1181 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1182
1183 barrier();
1184
1185 /* init leading/trailing edge */
1186 if (IS_MF(bp)) {
1187 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1188 if (bp->port.pmf)
1189 /* enable nig and gpio3 attention */
1190 val |= 0x1100;
1191 } else
1192 val = 0xffff;
1193
1194 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1195 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1196
1197 /* Make sure that interrupts are indeed enabled from here on */
1198 mmiowb();
1199}
1200
1201void bnx2x_int_enable(struct bnx2x *bp)
1202{
1203 if (bp->common.int_block == INT_BLOCK_HC)
1204 bnx2x_hc_int_enable(bp);
1205 else
1206 bnx2x_igu_int_enable(bp);
1207}
1208
1209static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001210{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001211 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001212 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1213 u32 val = REG_RD(bp, addr);
1214
1215 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1216 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1217 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1218 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1219
1220 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1221 val, port, addr);
1222
Eilon Greenstein8badd272009-02-12 08:36:15 +00001223 /* flush all outstanding writes */
1224 mmiowb();
1225
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001226 REG_WR(bp, addr, val);
1227 if (REG_RD(bp, addr) != val)
1228 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1229}
1230
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001231static void bnx2x_igu_int_disable(struct bnx2x *bp)
1232{
1233 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1234
1235 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1236 IGU_PF_CONF_INT_LINE_EN |
1237 IGU_PF_CONF_ATTN_BIT_EN);
1238
1239 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1240
1241 /* flush all outstanding writes */
1242 mmiowb();
1243
1244 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1245 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1246 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1247}
1248
1249void bnx2x_int_disable(struct bnx2x *bp)
1250{
1251 if (bp->common.int_block == INT_BLOCK_HC)
1252 bnx2x_hc_int_disable(bp);
1253 else
1254 bnx2x_igu_int_disable(bp);
1255}
1256
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001257void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001258{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001259 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001260 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001261
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001262 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001263 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001264 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1265
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001266 if (disable_hw)
1267 /* prevent the HW from sending interrupts */
1268 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001269
1270 /* make sure all ISRs are done */
1271 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001272 synchronize_irq(bp->msix_table[0].vector);
1273 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001274#ifdef BCM_CNIC
1275 offset++;
1276#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001277 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001278 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001279 } else
1280 synchronize_irq(bp->pdev->irq);
1281
1282 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001283 cancel_delayed_work(&bp->sp_task);
1284 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001285}
1286
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001287/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001288
1289/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001290 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001291 */
1292
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001293/* Return true if succeeded to acquire the lock */
1294static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1295{
1296 u32 lock_status;
1297 u32 resource_bit = (1 << resource);
1298 int func = BP_FUNC(bp);
1299 u32 hw_lock_control_reg;
1300
1301 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1302
1303 /* Validating that the resource is within range */
1304 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1305 DP(NETIF_MSG_HW,
1306 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1307 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001308 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001309 }
1310
1311 if (func <= 5)
1312 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1313 else
1314 hw_lock_control_reg =
1315 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1316
1317 /* Try to acquire the lock */
1318 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1319 lock_status = REG_RD(bp, hw_lock_control_reg);
1320 if (lock_status & resource_bit)
1321 return true;
1322
1323 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1324 return false;
1325}
1326
Michael Chan993ac7b2009-10-10 13:46:56 +00001327#ifdef BCM_CNIC
1328static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1329#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001330
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001331void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001332 union eth_rx_cqe *rr_cqe)
1333{
1334 struct bnx2x *bp = fp->bp;
1335 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1336 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1337
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001338 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001339 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001340 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001341 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001342
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001343 switch (command | fp->state) {
1344 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1345 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1346 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001347 break;
1348
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001349 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1350 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001351 fp->state = BNX2X_FP_STATE_HALTED;
1352 break;
1353
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001354 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1355 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1356 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001357 break;
1358
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001359 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001360 BNX2X_ERR("unexpected MC reply (%d) "
1361 "fp[%d] state is %x\n",
1362 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001363 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001364 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001365
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001366 smp_mb__before_atomic_inc();
1367 atomic_inc(&bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001368 /* push the change in fp->state and towards the memory */
1369 smp_wmb();
1370
1371 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001372}
1373
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001374irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001375{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001376 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001377 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001378 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001379 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001380
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001381 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001382 if (unlikely(status == 0)) {
1383 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1384 return IRQ_NONE;
1385 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001386 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001387
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001388 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001389 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1390 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1391 return IRQ_HANDLED;
1392 }
1393
Eilon Greenstein3196a882008-08-13 15:58:49 -07001394#ifdef BNX2X_STOP_ON_ERROR
1395 if (unlikely(bp->panic))
1396 return IRQ_HANDLED;
1397#endif
1398
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001399 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001400 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001401
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001402 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001403 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001404 /* Handle Rx and Tx according to SB id */
1405 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001406 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001407 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001408 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001409 status &= ~mask;
1410 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001411 }
1412
Michael Chan993ac7b2009-10-10 13:46:56 +00001413#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001414 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001415 if (status & (mask | 0x1)) {
1416 struct cnic_ops *c_ops = NULL;
1417
1418 rcu_read_lock();
1419 c_ops = rcu_dereference(bp->cnic_ops);
1420 if (c_ops)
1421 c_ops->cnic_handler(bp->cnic_data, NULL);
1422 rcu_read_unlock();
1423
1424 status &= ~mask;
1425 }
1426#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001427
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001428 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001429 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001430
1431 status &= ~0x1;
1432 if (!status)
1433 return IRQ_HANDLED;
1434 }
1435
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001436 if (unlikely(status))
1437 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001438 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001439
1440 return IRQ_HANDLED;
1441}
1442
1443/* end of fast path */
1444
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001445
1446/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001447
1448/*
1449 * General service functions
1450 */
1451
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001452int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001453{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001454 u32 lock_status;
1455 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001456 int func = BP_FUNC(bp);
1457 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001458 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001459
1460 /* Validating that the resource is within range */
1461 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1462 DP(NETIF_MSG_HW,
1463 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1464 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1465 return -EINVAL;
1466 }
1467
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001468 if (func <= 5) {
1469 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1470 } else {
1471 hw_lock_control_reg =
1472 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1473 }
1474
Eliezer Tamirf1410642008-02-28 11:51:50 -08001475 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001476 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001477 if (lock_status & resource_bit) {
1478 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1479 lock_status, resource_bit);
1480 return -EEXIST;
1481 }
1482
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001483 /* Try for 5 second every 5ms */
1484 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001485 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001486 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1487 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001488 if (lock_status & resource_bit)
1489 return 0;
1490
1491 msleep(5);
1492 }
1493 DP(NETIF_MSG_HW, "Timeout\n");
1494 return -EAGAIN;
1495}
1496
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001497int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001498{
1499 u32 lock_status;
1500 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001501 int func = BP_FUNC(bp);
1502 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001503
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001504 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1505
Eliezer Tamirf1410642008-02-28 11:51:50 -08001506 /* Validating that the resource is within range */
1507 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1508 DP(NETIF_MSG_HW,
1509 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1510 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1511 return -EINVAL;
1512 }
1513
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001514 if (func <= 5) {
1515 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1516 } else {
1517 hw_lock_control_reg =
1518 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1519 }
1520
Eliezer Tamirf1410642008-02-28 11:51:50 -08001521 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001522 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001523 if (!(lock_status & resource_bit)) {
1524 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1525 lock_status, resource_bit);
1526 return -EFAULT;
1527 }
1528
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001529 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001530 return 0;
1531}
1532
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001533
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001534int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1535{
1536 /* The GPIO should be swapped if swap register is set and active */
1537 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1538 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1539 int gpio_shift = gpio_num +
1540 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1541 u32 gpio_mask = (1 << gpio_shift);
1542 u32 gpio_reg;
1543 int value;
1544
1545 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1546 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1547 return -EINVAL;
1548 }
1549
1550 /* read GPIO value */
1551 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1552
1553 /* get the requested pin value */
1554 if ((gpio_reg & gpio_mask) == gpio_mask)
1555 value = 1;
1556 else
1557 value = 0;
1558
1559 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1560
1561 return value;
1562}
1563
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001564int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001565{
1566 /* The GPIO should be swapped if swap register is set and active */
1567 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001568 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001569 int gpio_shift = gpio_num +
1570 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1571 u32 gpio_mask = (1 << gpio_shift);
1572 u32 gpio_reg;
1573
1574 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1575 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1576 return -EINVAL;
1577 }
1578
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001579 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001580 /* read GPIO and mask except the float bits */
1581 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1582
1583 switch (mode) {
1584 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1585 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1586 gpio_num, gpio_shift);
1587 /* clear FLOAT and set CLR */
1588 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1589 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1590 break;
1591
1592 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1593 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1594 gpio_num, gpio_shift);
1595 /* clear FLOAT and set SET */
1596 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1597 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1598 break;
1599
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001600 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001601 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1602 gpio_num, gpio_shift);
1603 /* set FLOAT */
1604 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1605 break;
1606
1607 default:
1608 break;
1609 }
1610
1611 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001612 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001613
1614 return 0;
1615}
1616
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001617int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1618{
1619 /* The GPIO should be swapped if swap register is set and active */
1620 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1621 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1622 int gpio_shift = gpio_num +
1623 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1624 u32 gpio_mask = (1 << gpio_shift);
1625 u32 gpio_reg;
1626
1627 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1628 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1629 return -EINVAL;
1630 }
1631
1632 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1633 /* read GPIO int */
1634 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1635
1636 switch (mode) {
1637 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1638 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1639 "output low\n", gpio_num, gpio_shift);
1640 /* clear SET and set CLR */
1641 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1642 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1643 break;
1644
1645 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1646 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1647 "output high\n", gpio_num, gpio_shift);
1648 /* clear CLR and set SET */
1649 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1650 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1651 break;
1652
1653 default:
1654 break;
1655 }
1656
1657 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1658 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1659
1660 return 0;
1661}
1662
Eliezer Tamirf1410642008-02-28 11:51:50 -08001663static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1664{
1665 u32 spio_mask = (1 << spio_num);
1666 u32 spio_reg;
1667
1668 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1669 (spio_num > MISC_REGISTERS_SPIO_7)) {
1670 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1671 return -EINVAL;
1672 }
1673
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001674 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001675 /* read SPIO and mask except the float bits */
1676 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1677
1678 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001679 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001680 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1681 /* clear FLOAT and set CLR */
1682 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1683 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1684 break;
1685
Eilon Greenstein6378c022008-08-13 15:59:25 -07001686 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001687 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1688 /* clear FLOAT and set SET */
1689 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1690 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1691 break;
1692
1693 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1694 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1695 /* set FLOAT */
1696 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1697 break;
1698
1699 default:
1700 break;
1701 }
1702
1703 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001704 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001705
1706 return 0;
1707}
1708
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001709int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1710{
1711 u32 sel_phy_idx = 0;
1712 if (bp->link_vars.link_up) {
1713 sel_phy_idx = EXT_PHY1;
1714 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1715 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1716 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1717 sel_phy_idx = EXT_PHY2;
1718 } else {
1719
1720 switch (bnx2x_phy_selection(&bp->link_params)) {
1721 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1722 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1723 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1724 sel_phy_idx = EXT_PHY1;
1725 break;
1726 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1727 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1728 sel_phy_idx = EXT_PHY2;
1729 break;
1730 }
1731 }
1732 /*
1733 * The selected actived PHY is always after swapping (in case PHY
1734 * swapping is enabled). So when swapping is enabled, we need to reverse
1735 * the configuration
1736 */
1737
1738 if (bp->link_params.multi_phy_config &
1739 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1740 if (sel_phy_idx == EXT_PHY1)
1741 sel_phy_idx = EXT_PHY2;
1742 else if (sel_phy_idx == EXT_PHY2)
1743 sel_phy_idx = EXT_PHY1;
1744 }
1745 return LINK_CONFIG_IDX(sel_phy_idx);
1746}
1747
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001748void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001749{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001750 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001751 switch (bp->link_vars.ieee_fc &
1752 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001753 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001754 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001755 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001756 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001757
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001758 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001759 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001760 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001761 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001762
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001763 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001764 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001765 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001766
Eliezer Tamirf1410642008-02-28 11:51:50 -08001767 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001768 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001769 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001770 break;
1771 }
1772}
1773
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001774u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001775{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001776 if (!BP_NOMCP(bp)) {
1777 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001778 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1779 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001780 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001781 /* It is recommended to turn off RX FC for jumbo frames
1782 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001783 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001784 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001785 else
David S. Millerc0700f92008-12-16 23:53:20 -08001786 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001787
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001788 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001789
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001790 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001791 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001792 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1793 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001794
Eilon Greenstein19680c42008-08-13 15:47:33 -07001795 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001796
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001797 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001798
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001799 bnx2x_calc_fc_adv(bp);
1800
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001801 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1802 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001803 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001804 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001805 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001806 return rc;
1807 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001808 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001809 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001810}
1811
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001812void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001813{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001814 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001815 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001816 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001817 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001818 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001819
Eilon Greenstein19680c42008-08-13 15:47:33 -07001820 bnx2x_calc_fc_adv(bp);
1821 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001822 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001823}
1824
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001825static void bnx2x__link_reset(struct bnx2x *bp)
1826{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001827 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001828 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001829 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001830 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001831 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001832 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001833}
1834
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001835u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001836{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001837 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001838
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001839 if (!BP_NOMCP(bp)) {
1840 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001841 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1842 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001843 bnx2x_release_phy_lock(bp);
1844 } else
1845 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001846
1847 return rc;
1848}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001849
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001850static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001851{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001852 u32 r_param = bp->link_vars.line_speed / 8;
1853 u32 fair_periodic_timeout_usec;
1854 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001855
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001856 memset(&(bp->cmng.rs_vars), 0,
1857 sizeof(struct rate_shaping_vars_per_port));
1858 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001859
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001860 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1861 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001862
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001863 /* this is the threshold below which no timer arming will occur
1864 1.25 coefficient is for the threshold to be a little bigger
1865 than the real time, to compensate for timer in-accuracy */
1866 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001867 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1868
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001869 /* resolution of fairness timer */
1870 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1871 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1872 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001873
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001874 /* this is the threshold below which we won't arm the timer anymore */
1875 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001876
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001877 /* we multiply by 1e3/8 to get bytes/msec.
1878 We don't want the credits to pass a credit
1879 of the t_fair*FAIR_MEM (algorithm resolution) */
1880 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1881 /* since each tick is 4 usec */
1882 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001883}
1884
Eilon Greenstein2691d512009-08-12 08:22:08 +00001885/* Calculates the sum of vn_min_rates.
1886 It's needed for further normalizing of the min_rates.
1887 Returns:
1888 sum of vn_min_rates.
1889 or
1890 0 - if all the min_rates are 0.
1891 In the later case fainess algorithm should be deactivated.
1892 If not all min_rates are zero then those that are zeroes will be set to 1.
1893 */
1894static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1895{
1896 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001897 int vn;
1898
1899 bp->vn_weight_sum = 0;
1900 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001901 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001902 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1903 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1904
1905 /* Skip hidden vns */
1906 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1907 continue;
1908
1909 /* If min rate is zero - set it to 1 */
1910 if (!vn_min_rate)
1911 vn_min_rate = DEF_MIN_RATE;
1912 else
1913 all_zero = 0;
1914
1915 bp->vn_weight_sum += vn_min_rate;
1916 }
1917
1918 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001919 if (all_zero) {
1920 bp->cmng.flags.cmng_enables &=
1921 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1922 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1923 " fairness will be disabled\n");
1924 } else
1925 bp->cmng.flags.cmng_enables |=
1926 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001927}
1928
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001929static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001930{
1931 struct rate_shaping_vars_per_vn m_rs_vn;
1932 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001933 u32 vn_cfg = bp->mf_config[vn];
1934 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001935 u16 vn_min_rate, vn_max_rate;
1936 int i;
1937
1938 /* If function is hidden - set min and max to zeroes */
1939 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1940 vn_min_rate = 0;
1941 vn_max_rate = 0;
1942
1943 } else {
1944 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1945 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001946 /* If min rate is zero - set it to 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001947 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001948 vn_min_rate = DEF_MIN_RATE;
1949 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1950 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1951 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001952
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001953 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001954 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001955 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001956
1957 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1958 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1959
1960 /* global vn counter - maximal Mbps for this vn */
1961 m_rs_vn.vn_counter.rate = vn_max_rate;
1962
1963 /* quota - number of bytes transmitted in this period */
1964 m_rs_vn.vn_counter.quota =
1965 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1966
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001967 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001968 /* credit for each period of the fairness algorithm:
1969 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001970 vn_weight_sum should not be larger than 10000, thus
1971 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1972 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001973 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001974 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1975 (8 * bp->vn_weight_sum))),
1976 (bp->cmng.fair_vars.fair_threshold * 2));
1977 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001978 m_fair_vn.vn_credit_delta);
1979 }
1980
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001981 /* Store it to internal memory */
1982 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1983 REG_WR(bp, BAR_XSTRORM_INTMEM +
1984 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1985 ((u32 *)(&m_rs_vn))[i]);
1986
1987 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1988 REG_WR(bp, BAR_XSTRORM_INTMEM +
1989 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1990 ((u32 *)(&m_fair_vn))[i]);
1991}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001992
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001993static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1994{
1995 if (CHIP_REV_IS_SLOW(bp))
1996 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001997 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001998 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001999
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002000 return CMNG_FNS_NONE;
2001}
2002
2003static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2004{
2005 int vn;
2006
2007 if (BP_NOMCP(bp))
2008 return; /* what should be the default bvalue in this case */
2009
2010 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2011 int /*abs*/func = 2*vn + BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002012 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002013 MF_CFG_RD(bp, func_mf_config[func].config);
2014 }
2015}
2016
2017static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2018{
2019
2020 if (cmng_type == CMNG_FNS_MINMAX) {
2021 int vn;
2022
2023 /* clear cmng_enables */
2024 bp->cmng.flags.cmng_enables = 0;
2025
2026 /* read mf conf from shmem */
2027 if (read_cfg)
2028 bnx2x_read_mf_cfg(bp);
2029
2030 /* Init rate shaping and fairness contexts */
2031 bnx2x_init_port_minmax(bp);
2032
2033 /* vn_weight_sum and enable fairness if not 0 */
2034 bnx2x_calc_vn_weight_sum(bp);
2035
2036 /* calculate and set min-max rate for each vn */
2037 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2038 bnx2x_init_vn_minmax(bp, vn);
2039
2040 /* always enable rate shaping and fairness */
2041 bp->cmng.flags.cmng_enables |=
2042 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2043 if (!bp->vn_weight_sum)
2044 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2045 " fairness will be disabled\n");
2046 return;
2047 }
2048
2049 /* rate shaping and fairness are disabled */
2050 DP(NETIF_MSG_IFUP,
2051 "rate shaping and fairness are disabled\n");
2052}
2053
2054static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2055{
2056 int port = BP_PORT(bp);
2057 int func;
2058 int vn;
2059
2060 /* Set the attention towards other drivers on the same port */
2061 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2062 if (vn == BP_E1HVN(bp))
2063 continue;
2064
2065 func = ((vn << 1) | port);
2066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2067 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2068 }
2069}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002070
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002071/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002072static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002073{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002074 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002075 /* Make sure that we are synced with the current statistics */
2076 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2077
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002078 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002079
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002080 if (bp->link_vars.link_up) {
2081
Eilon Greenstein1c063282009-02-12 08:36:43 +00002082 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002083 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002084 int port = BP_PORT(bp);
2085 u32 pause_enabled = 0;
2086
2087 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2088 pause_enabled = 1;
2089
2090 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002091 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002092 pause_enabled);
2093 }
2094
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002095 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2096 struct host_port_stats *pstats;
2097
2098 pstats = bnx2x_sp(bp, port_stats);
2099 /* reset old bmac stats */
2100 memset(&(pstats->mac_stx[0]), 0,
2101 sizeof(struct mac_stx));
2102 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002103 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002104 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2105 }
2106
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002107 /* indicate link status only if link status actually changed */
2108 if (prev_link_status != bp->link_vars.link_status)
2109 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002110
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002111 if (IS_MF(bp))
2112 bnx2x_link_sync_notify(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002113
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002114 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2115 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002116
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002117 if (cmng_fns != CMNG_FNS_NONE) {
2118 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2119 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2120 } else
2121 /* rate shaping and fairness are disabled */
2122 DP(NETIF_MSG_IFUP,
2123 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002124 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002125}
2126
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002127void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002128{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002129 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002130 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002131
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002132 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2133
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002134 if (bp->link_vars.link_up)
2135 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2136 else
2137 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2138
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002139 /* the link status update could be the result of a DCC event
2140 hence re-read the shmem mf configuration */
2141 bnx2x_read_mf_cfg(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002142
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002143 /* indicate link status */
2144 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002145}
2146
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002147static void bnx2x_pmf_update(struct bnx2x *bp)
2148{
2149 int port = BP_PORT(bp);
2150 u32 val;
2151
2152 bp->port.pmf = 1;
2153 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2154
2155 /* enable nig attention */
2156 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002157 if (bp->common.int_block == INT_BLOCK_HC) {
2158 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2159 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2160 } else if (CHIP_IS_E2(bp)) {
2161 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2162 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2163 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002164
2165 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002166}
2167
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002168/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002169
2170/* slow path */
2171
2172/*
2173 * General service functions
2174 */
2175
Eilon Greenstein2691d512009-08-12 08:22:08 +00002176/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002177u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002178{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002179 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002180 u32 seq = ++bp->fw_seq;
2181 u32 rc = 0;
2182 u32 cnt = 1;
2183 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2184
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002185 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002186 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2187 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2188
Eilon Greenstein2691d512009-08-12 08:22:08 +00002189 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2190
2191 do {
2192 /* let the FW do it's magic ... */
2193 msleep(delay);
2194
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002195 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002196
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002197 /* Give the FW up to 5 second (500*10ms) */
2198 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002199
2200 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2201 cnt*delay, rc, seq);
2202
2203 /* is this a reply to our command? */
2204 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2205 rc &= FW_MSG_CODE_MASK;
2206 else {
2207 /* FW BUG! */
2208 BNX2X_ERR("FW failed to respond!\n");
2209 bnx2x_fw_dump(bp);
2210 rc = 0;
2211 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002212 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002213
2214 return rc;
2215}
2216
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002217/* must be called under rtnl_lock */
2218void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2219{
2220 u32 mask = (1 << cl_id);
2221
2222 /* initial seeting is BNX2X_ACCEPT_NONE */
2223 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2224 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2225 u8 unmatched_unicast = 0;
2226
2227 if (filters & BNX2X_PROMISCUOUS_MODE) {
2228 /* promiscious - accept all, drop none */
2229 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2230 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2231 }
2232 if (filters & BNX2X_ACCEPT_UNICAST) {
2233 /* accept matched ucast */
2234 drop_all_ucast = 0;
2235 }
2236 if (filters & BNX2X_ACCEPT_MULTICAST) {
2237 /* accept matched mcast */
2238 drop_all_mcast = 0;
2239 }
2240 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2241 /* accept all mcast */
2242 drop_all_ucast = 0;
2243 accp_all_ucast = 1;
2244 }
2245 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2246 /* accept all mcast */
2247 drop_all_mcast = 0;
2248 accp_all_mcast = 1;
2249 }
2250 if (filters & BNX2X_ACCEPT_BROADCAST) {
2251 /* accept (all) bcast */
2252 drop_all_bcast = 0;
2253 accp_all_bcast = 1;
2254 }
2255
2256 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2257 bp->mac_filters.ucast_drop_all | mask :
2258 bp->mac_filters.ucast_drop_all & ~mask;
2259
2260 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2261 bp->mac_filters.mcast_drop_all | mask :
2262 bp->mac_filters.mcast_drop_all & ~mask;
2263
2264 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2265 bp->mac_filters.bcast_drop_all | mask :
2266 bp->mac_filters.bcast_drop_all & ~mask;
2267
2268 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2269 bp->mac_filters.ucast_accept_all | mask :
2270 bp->mac_filters.ucast_accept_all & ~mask;
2271
2272 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2273 bp->mac_filters.mcast_accept_all | mask :
2274 bp->mac_filters.mcast_accept_all & ~mask;
2275
2276 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2277 bp->mac_filters.bcast_accept_all | mask :
2278 bp->mac_filters.bcast_accept_all & ~mask;
2279
2280 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2281 bp->mac_filters.unmatched_unicast | mask :
2282 bp->mac_filters.unmatched_unicast & ~mask;
2283}
2284
2285void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2286{
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002287 struct tstorm_eth_function_common_config tcfg = {0};
2288 u16 rss_flgs;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002289
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002290 /* tpa */
2291 if (p->func_flgs & FUNC_FLG_TPA)
2292 tcfg.config_flags |=
2293 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002294
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002295 /* set rss flags */
2296 rss_flgs = (p->rss->mode <<
2297 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002298
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002299 if (p->rss->cap & RSS_IPV4_CAP)
2300 rss_flgs |= RSS_IPV4_CAP_MASK;
2301 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2302 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2303 if (p->rss->cap & RSS_IPV6_CAP)
2304 rss_flgs |= RSS_IPV6_CAP_MASK;
2305 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2306 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002307
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002308 tcfg.config_flags |= rss_flgs;
2309 tcfg.rss_result_mask = p->rss->result_mask;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002310
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002311 storm_memset_func_cfg(bp, &tcfg, p->func_id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002312
2313 /* Enable the function in the FW */
2314 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2315 storm_memset_func_en(bp, p->func_id, 1);
2316
2317 /* statistics */
2318 if (p->func_flgs & FUNC_FLG_STATS) {
2319 struct stats_indication_flags stats_flags = {0};
2320 stats_flags.collect_eth = 1;
2321
2322 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2323 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2324
2325 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2326 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2327
2328 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2329 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2330
2331 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2332 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2333 }
2334
2335 /* spq */
2336 if (p->func_flgs & FUNC_FLG_SPQ) {
2337 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2338 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2339 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2340 }
2341}
2342
2343static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2344 struct bnx2x_fastpath *fp)
2345{
2346 u16 flags = 0;
2347
2348 /* calculate queue flags */
2349 flags |= QUEUE_FLG_CACHE_ALIGN;
2350 flags |= QUEUE_FLG_HC;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002351 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002352
2353#ifdef BCM_VLAN
2354 flags |= QUEUE_FLG_VLAN;
2355 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2356#endif
2357
2358 if (!fp->disable_tpa)
2359 flags |= QUEUE_FLG_TPA;
2360
2361 flags |= QUEUE_FLG_STATS;
2362
2363 return flags;
2364}
2365
2366static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2367 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2368 struct bnx2x_rxq_init_params *rxq_init)
2369{
2370 u16 max_sge = 0;
2371 u16 sge_sz = 0;
2372 u16 tpa_agg_size = 0;
2373
2374 /* calculate queue flags */
2375 u16 flags = bnx2x_get_cl_flags(bp, fp);
2376
2377 if (!fp->disable_tpa) {
2378 pause->sge_th_hi = 250;
2379 pause->sge_th_lo = 150;
2380 tpa_agg_size = min_t(u32,
2381 (min_t(u32, 8, MAX_SKB_FRAGS) *
2382 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2383 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2384 SGE_PAGE_SHIFT;
2385 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2386 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2387 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2388 0xffff);
2389 }
2390
2391 /* pause - not for e1 */
2392 if (!CHIP_IS_E1(bp)) {
2393 pause->bd_th_hi = 350;
2394 pause->bd_th_lo = 250;
2395 pause->rcq_th_hi = 350;
2396 pause->rcq_th_lo = 250;
2397 pause->sge_th_hi = 0;
2398 pause->sge_th_lo = 0;
2399 pause->pri_map = 1;
2400 }
2401
2402 /* rxq setup */
2403 rxq_init->flags = flags;
2404 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2405 rxq_init->dscr_map = fp->rx_desc_mapping;
2406 rxq_init->sge_map = fp->rx_sge_mapping;
2407 rxq_init->rcq_map = fp->rx_comp_mapping;
2408 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2409 rxq_init->mtu = bp->dev->mtu;
2410 rxq_init->buf_sz = bp->rx_buf_size;
2411 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2412 rxq_init->cl_id = fp->cl_id;
2413 rxq_init->spcl_id = fp->cl_id;
2414 rxq_init->stat_id = fp->cl_id;
2415 rxq_init->tpa_agg_sz = tpa_agg_size;
2416 rxq_init->sge_buf_sz = sge_sz;
2417 rxq_init->max_sges_pkt = max_sge;
2418 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2419 rxq_init->fw_sb_id = fp->fw_sb_id;
2420
2421 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2422
2423 rxq_init->cid = HW_CID(bp, fp->cid);
2424
2425 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2426}
2427
2428static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2429 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2430{
2431 u16 flags = bnx2x_get_cl_flags(bp, fp);
2432
2433 txq_init->flags = flags;
2434 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2435 txq_init->dscr_map = fp->tx_desc_mapping;
2436 txq_init->stat_id = fp->cl_id;
2437 txq_init->cid = HW_CID(bp, fp->cid);
2438 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2439 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2440 txq_init->fw_sb_id = fp->fw_sb_id;
2441 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2442}
2443
2444void bnx2x_pf_init(struct bnx2x *bp)
2445{
2446 struct bnx2x_func_init_params func_init = {0};
2447 struct bnx2x_rss_params rss = {0};
2448 struct event_ring_data eq_data = { {0} };
2449 u16 flags;
2450
2451 /* pf specific setups */
2452 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002453 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002454
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002455 if (CHIP_IS_E2(bp)) {
2456 /* reset IGU PF statistics: MSIX + ATTN */
2457 /* PF */
2458 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2459 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2460 (CHIP_MODE_IS_4_PORT(bp) ?
2461 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2462 /* ATTN */
2463 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2464 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2465 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2466 (CHIP_MODE_IS_4_PORT(bp) ?
2467 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2468 }
2469
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002470 /* function setup flags */
2471 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2472
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002473 if (CHIP_IS_E1x(bp))
2474 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2475 else
2476 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002477
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002478 /* function setup */
2479
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002480 /**
2481 * Although RSS is meaningless when there is a single HW queue we
2482 * still need it enabled in order to have HW Rx hash generated.
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002483 */
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002484 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2485 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2486 rss.mode = bp->multi_mode;
2487 rss.result_mask = MULTI_MASK;
2488 func_init.rss = &rss;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002489
2490 func_init.func_flgs = flags;
2491 func_init.pf_id = BP_FUNC(bp);
2492 func_init.func_id = BP_FUNC(bp);
2493 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2494 func_init.spq_map = bp->spq_mapping;
2495 func_init.spq_prod = bp->spq_prod_idx;
2496
2497 bnx2x_func_init(bp, &func_init);
2498
2499 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2500
2501 /*
2502 Congestion management values depend on the link rate
2503 There is no active link so initial link rate is set to 10 Gbps.
2504 When the link comes up The congestion management values are
2505 re-calculated according to the actual link rate.
2506 */
2507 bp->link_vars.line_speed = SPEED_10000;
2508 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2509
2510 /* Only the PMF sets the HW */
2511 if (bp->port.pmf)
2512 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2513
2514 /* no rx until link is up */
2515 bp->rx_mode = BNX2X_RX_MODE_NONE;
2516 bnx2x_set_storm_rx_mode(bp);
2517
2518 /* init Event Queue */
2519 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2520 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2521 eq_data.producer = bp->eq_prod;
2522 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2523 eq_data.sb_id = DEF_SB_ID;
2524 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2525}
2526
2527
Eilon Greenstein2691d512009-08-12 08:22:08 +00002528static void bnx2x_e1h_disable(struct bnx2x *bp)
2529{
2530 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002531
2532 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002533
2534 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2535
Eilon Greenstein2691d512009-08-12 08:22:08 +00002536 netif_carrier_off(bp->dev);
2537}
2538
2539static void bnx2x_e1h_enable(struct bnx2x *bp)
2540{
2541 int port = BP_PORT(bp);
2542
2543 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2544
Eilon Greenstein2691d512009-08-12 08:22:08 +00002545 /* Tx queue should be only reenabled */
2546 netif_tx_wake_all_queues(bp->dev);
2547
Eilon Greenstein061bc702009-10-15 00:18:47 -07002548 /*
2549 * Should not call netif_carrier_on since it will be called if the link
2550 * is up when checking for link state
2551 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002552}
2553
Eilon Greenstein2691d512009-08-12 08:22:08 +00002554static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2555{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002556 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002557
2558 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2559
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002560 /*
2561 * This is the only place besides the function initialization
2562 * where the bp->flags can change so it is done without any
2563 * locks
2564 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002565 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002566 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002567 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002568
2569 bnx2x_e1h_disable(bp);
2570 } else {
2571 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002572 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002573
2574 bnx2x_e1h_enable(bp);
2575 }
2576 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2577 }
2578 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2579
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002580 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2581 bnx2x_link_sync_notify(bp);
2582 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002583 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2584 }
2585
2586 /* Report results to MCP */
2587 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002588 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002589 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002590 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002591}
2592
Michael Chan28912902009-10-10 13:46:53 +00002593/* must be called under the spq lock */
2594static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2595{
2596 struct eth_spe *next_spe = bp->spq_prod_bd;
2597
2598 if (bp->spq_prod_bd == bp->spq_last_bd) {
2599 bp->spq_prod_bd = bp->spq;
2600 bp->spq_prod_idx = 0;
2601 DP(NETIF_MSG_TIMER, "end of spq\n");
2602 } else {
2603 bp->spq_prod_bd++;
2604 bp->spq_prod_idx++;
2605 }
2606 return next_spe;
2607}
2608
2609/* must be called under the spq lock */
2610static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2611{
2612 int func = BP_FUNC(bp);
2613
2614 /* Make sure that BD data is updated before writing the producer */
2615 wmb();
2616
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002617 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002618 bp->spq_prod_idx);
Michael Chan28912902009-10-10 13:46:53 +00002619 mmiowb();
2620}
2621
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002622/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002623int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002624 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002625{
Michael Chan28912902009-10-10 13:46:53 +00002626 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002627 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002628
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002629#ifdef BNX2X_STOP_ON_ERROR
2630 if (unlikely(bp->panic))
2631 return -EIO;
2632#endif
2633
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002634 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002635
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002636 if (!atomic_read(&bp->spq_left)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002637 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002638 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002639 bnx2x_panic();
2640 return -EBUSY;
2641 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002642
Michael Chan28912902009-10-10 13:46:53 +00002643 spe = bnx2x_sp_get_next(bp);
2644
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002645 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002646 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002647 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2648 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002649
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002650 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002651 /* Common ramrods:
2652 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2653 * TRAFFIC_STOP, TRAFFIC_START
2654 */
2655 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2656 & SPE_HDR_CONN_TYPE;
2657 else
2658 /* ETH ramrods: SETUP, HALT */
2659 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2660 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002661
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002662 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2663 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002664
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002665 spe->hdr.type = cpu_to_le16(type);
2666
2667 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2668 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2669
2670 /* stats ramrod has it's own slot on the spq */
2671 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2672 /* It's ok if the actual decrement is issued towards the memory
2673 * somewhere between the spin_lock and spin_unlock. Thus no
2674 * more explict memory barrier is needed.
2675 */
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002676 atomic_dec(&bp->spq_left);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002677
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002678 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002679 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2680 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002681 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2682 (u32)(U64_LO(bp->spq_mapping) +
2683 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002684 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002685
Michael Chan28912902009-10-10 13:46:53 +00002686 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002687 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002688 return 0;
2689}
2690
2691/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002692static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002693{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002694 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002695 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002696
2697 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002698 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002699 val = (1UL << 31);
2700 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2701 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2702 if (val & (1L << 31))
2703 break;
2704
2705 msleep(5);
2706 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002707 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002708 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002709 rc = -EBUSY;
2710 }
2711
2712 return rc;
2713}
2714
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002715/* release split MCP access lock register */
2716static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002717{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002718 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002719}
2720
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002721#define BNX2X_DEF_SB_ATT_IDX 0x0001
2722#define BNX2X_DEF_SB_IDX 0x0002
2723
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002724static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2725{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002726 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002727 u16 rc = 0;
2728
2729 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002730 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2731 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002732 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002733 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002734
2735 if (bp->def_idx != def_sb->sp_sb.running_index) {
2736 bp->def_idx = def_sb->sp_sb.running_index;
2737 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002738 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002739
2740 /* Do not reorder: indecies reading should complete before handling */
2741 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002742 return rc;
2743}
2744
2745/*
2746 * slow path service functions
2747 */
2748
2749static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2750{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002751 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002752 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2753 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002754 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2755 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002756 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002757 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002758 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002759
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002760 if (bp->attn_state & asserted)
2761 BNX2X_ERR("IGU ERROR\n");
2762
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002763 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2764 aeu_mask = REG_RD(bp, aeu_addr);
2765
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002766 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002767 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002768 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002769 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002770
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002771 REG_WR(bp, aeu_addr, aeu_mask);
2772 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002773
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002774 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002775 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002776 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002777
2778 if (asserted & ATTN_HARD_WIRED_MASK) {
2779 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002780
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002781 bnx2x_acquire_phy_lock(bp);
2782
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002783 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002784 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002785 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002786
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002787 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002788
2789 /* handle unicore attn? */
2790 }
2791 if (asserted & ATTN_SW_TIMER_4_FUNC)
2792 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2793
2794 if (asserted & GPIO_2_FUNC)
2795 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2796
2797 if (asserted & GPIO_3_FUNC)
2798 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2799
2800 if (asserted & GPIO_4_FUNC)
2801 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2802
2803 if (port == 0) {
2804 if (asserted & ATTN_GENERAL_ATTN_1) {
2805 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2806 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2807 }
2808 if (asserted & ATTN_GENERAL_ATTN_2) {
2809 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2810 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2811 }
2812 if (asserted & ATTN_GENERAL_ATTN_3) {
2813 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2814 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2815 }
2816 } else {
2817 if (asserted & ATTN_GENERAL_ATTN_4) {
2818 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2819 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2820 }
2821 if (asserted & ATTN_GENERAL_ATTN_5) {
2822 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2823 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2824 }
2825 if (asserted & ATTN_GENERAL_ATTN_6) {
2826 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2827 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2828 }
2829 }
2830
2831 } /* if hardwired */
2832
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002833 if (bp->common.int_block == INT_BLOCK_HC)
2834 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2835 COMMAND_REG_ATTN_BITS_SET);
2836 else
2837 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2838
2839 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2840 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2841 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002842
2843 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002844 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002845 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002846 bnx2x_release_phy_lock(bp);
2847 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002848}
2849
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002850static inline void bnx2x_fan_failure(struct bnx2x *bp)
2851{
2852 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002853 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002854 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002855 ext_phy_config =
2856 SHMEM_RD(bp,
2857 dev_info.port_hw_config[port].external_phy_config);
2858
2859 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2860 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002861 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002862 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002863
2864 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002865 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2866 " the driver to shutdown the card to prevent permanent"
2867 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002868}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002869
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002870static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2871{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002872 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002873 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002874 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002875
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002876 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2877 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002878
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002879 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002880
2881 val = REG_RD(bp, reg_offset);
2882 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2883 REG_WR(bp, reg_offset, val);
2884
2885 BNX2X_ERR("SPIO5 hw attention\n");
2886
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002887 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002888 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002889 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002890 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002891
Eilon Greenstein589abe32009-02-12 08:36:55 +00002892 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2893 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2894 bnx2x_acquire_phy_lock(bp);
2895 bnx2x_handle_module_detect_int(&bp->link_params);
2896 bnx2x_release_phy_lock(bp);
2897 }
2898
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002899 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2900
2901 val = REG_RD(bp, reg_offset);
2902 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2903 REG_WR(bp, reg_offset, val);
2904
2905 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002906 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002907 bnx2x_panic();
2908 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002909}
2910
2911static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2912{
2913 u32 val;
2914
Eilon Greenstein0626b892009-02-12 08:38:14 +00002915 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002916
2917 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2918 BNX2X_ERR("DB hw attention 0x%x\n", val);
2919 /* DORQ discard attention */
2920 if (val & 0x2)
2921 BNX2X_ERR("FATAL error from DORQ\n");
2922 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002923
2924 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2925
2926 int port = BP_PORT(bp);
2927 int reg_offset;
2928
2929 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2930 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2931
2932 val = REG_RD(bp, reg_offset);
2933 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2934 REG_WR(bp, reg_offset, val);
2935
2936 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002937 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002938 bnx2x_panic();
2939 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002940}
2941
2942static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2943{
2944 u32 val;
2945
2946 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2947
2948 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2949 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2950 /* CFC error attention */
2951 if (val & 0x2)
2952 BNX2X_ERR("FATAL error from CFC\n");
2953 }
2954
2955 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2956
2957 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2958 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2959 /* RQ_USDMDP_FIFO_OVERFLOW */
2960 if (val & 0x18000)
2961 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002962 if (CHIP_IS_E2(bp)) {
2963 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2964 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2965 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002966 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002967
2968 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2969
2970 int port = BP_PORT(bp);
2971 int reg_offset;
2972
2973 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2974 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2975
2976 val = REG_RD(bp, reg_offset);
2977 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2978 REG_WR(bp, reg_offset, val);
2979
2980 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002981 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002982 bnx2x_panic();
2983 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002984}
2985
2986static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2987{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002988 u32 val;
2989
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002990 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2991
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002992 if (attn & BNX2X_PMF_LINK_ASSERT) {
2993 int func = BP_FUNC(bp);
2994
2995 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002996 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
2997 func_mf_config[BP_ABS_FUNC(bp)].config);
2998 val = SHMEM_RD(bp,
2999 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003000 if (val & DRV_STATUS_DCC_EVENT_MASK)
3001 bnx2x_dcc_event(bp,
3002 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003003 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003004 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003005 bnx2x_pmf_update(bp);
3006
3007 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003008
3009 BNX2X_ERR("MC assert!\n");
3010 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3011 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3012 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3013 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3014 bnx2x_panic();
3015
3016 } else if (attn & BNX2X_MCP_ASSERT) {
3017
3018 BNX2X_ERR("MCP assert!\n");
3019 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003020 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003021
3022 } else
3023 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3024 }
3025
3026 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003027 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3028 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003029 val = CHIP_IS_E1(bp) ? 0 :
3030 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003031 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3032 }
3033 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003034 val = CHIP_IS_E1(bp) ? 0 :
3035 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003036 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3037 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003038 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003039 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003040}
3041
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003042#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3043#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3044#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3045#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3046#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3047#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003048
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003049/*
3050 * should be run under rtnl lock
3051 */
3052static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3053{
3054 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3055 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3056 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3057 barrier();
3058 mmiowb();
3059}
3060
3061/*
3062 * should be run under rtnl lock
3063 */
3064static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3065{
3066 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3067 val |= (1 << 16);
3068 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3069 barrier();
3070 mmiowb();
3071}
3072
3073/*
3074 * should be run under rtnl lock
3075 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003076bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003077{
3078 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3079 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3080 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3081}
3082
3083/*
3084 * should be run under rtnl lock
3085 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003086inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003087{
3088 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3089
3090 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3091
3092 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3093 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3094 barrier();
3095 mmiowb();
3096}
3097
3098/*
3099 * should be run under rtnl lock
3100 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003101u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003102{
3103 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3104
3105 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3106
3107 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3108 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3109 barrier();
3110 mmiowb();
3111
3112 return val1;
3113}
3114
3115/*
3116 * should be run under rtnl lock
3117 */
3118static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3119{
3120 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3121}
3122
3123static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3124{
3125 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3126 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3127}
3128
3129static inline void _print_next_block(int idx, const char *blk)
3130{
3131 if (idx)
3132 pr_cont(", ");
3133 pr_cont("%s", blk);
3134}
3135
3136static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3137{
3138 int i = 0;
3139 u32 cur_bit = 0;
3140 for (i = 0; sig; i++) {
3141 cur_bit = ((u32)0x1 << i);
3142 if (sig & cur_bit) {
3143 switch (cur_bit) {
3144 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3145 _print_next_block(par_num++, "BRB");
3146 break;
3147 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3148 _print_next_block(par_num++, "PARSER");
3149 break;
3150 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3151 _print_next_block(par_num++, "TSDM");
3152 break;
3153 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3154 _print_next_block(par_num++, "SEARCHER");
3155 break;
3156 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3157 _print_next_block(par_num++, "TSEMI");
3158 break;
3159 }
3160
3161 /* Clear the bit */
3162 sig &= ~cur_bit;
3163 }
3164 }
3165
3166 return par_num;
3167}
3168
3169static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3170{
3171 int i = 0;
3172 u32 cur_bit = 0;
3173 for (i = 0; sig; i++) {
3174 cur_bit = ((u32)0x1 << i);
3175 if (sig & cur_bit) {
3176 switch (cur_bit) {
3177 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3178 _print_next_block(par_num++, "PBCLIENT");
3179 break;
3180 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3181 _print_next_block(par_num++, "QM");
3182 break;
3183 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3184 _print_next_block(par_num++, "XSDM");
3185 break;
3186 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3187 _print_next_block(par_num++, "XSEMI");
3188 break;
3189 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3190 _print_next_block(par_num++, "DOORBELLQ");
3191 break;
3192 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3193 _print_next_block(par_num++, "VAUX PCI CORE");
3194 break;
3195 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3196 _print_next_block(par_num++, "DEBUG");
3197 break;
3198 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3199 _print_next_block(par_num++, "USDM");
3200 break;
3201 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3202 _print_next_block(par_num++, "USEMI");
3203 break;
3204 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3205 _print_next_block(par_num++, "UPB");
3206 break;
3207 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3208 _print_next_block(par_num++, "CSDM");
3209 break;
3210 }
3211
3212 /* Clear the bit */
3213 sig &= ~cur_bit;
3214 }
3215 }
3216
3217 return par_num;
3218}
3219
3220static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3221{
3222 int i = 0;
3223 u32 cur_bit = 0;
3224 for (i = 0; sig; i++) {
3225 cur_bit = ((u32)0x1 << i);
3226 if (sig & cur_bit) {
3227 switch (cur_bit) {
3228 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3229 _print_next_block(par_num++, "CSEMI");
3230 break;
3231 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3232 _print_next_block(par_num++, "PXP");
3233 break;
3234 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3235 _print_next_block(par_num++,
3236 "PXPPCICLOCKCLIENT");
3237 break;
3238 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3239 _print_next_block(par_num++, "CFC");
3240 break;
3241 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3242 _print_next_block(par_num++, "CDU");
3243 break;
3244 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3245 _print_next_block(par_num++, "IGU");
3246 break;
3247 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3248 _print_next_block(par_num++, "MISC");
3249 break;
3250 }
3251
3252 /* Clear the bit */
3253 sig &= ~cur_bit;
3254 }
3255 }
3256
3257 return par_num;
3258}
3259
3260static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3261{
3262 int i = 0;
3263 u32 cur_bit = 0;
3264 for (i = 0; sig; i++) {
3265 cur_bit = ((u32)0x1 << i);
3266 if (sig & cur_bit) {
3267 switch (cur_bit) {
3268 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3269 _print_next_block(par_num++, "MCP ROM");
3270 break;
3271 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3272 _print_next_block(par_num++, "MCP UMP RX");
3273 break;
3274 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3275 _print_next_block(par_num++, "MCP UMP TX");
3276 break;
3277 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3278 _print_next_block(par_num++, "MCP SCPAD");
3279 break;
3280 }
3281
3282 /* Clear the bit */
3283 sig &= ~cur_bit;
3284 }
3285 }
3286
3287 return par_num;
3288}
3289
3290static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3291 u32 sig2, u32 sig3)
3292{
3293 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3294 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3295 int par_num = 0;
3296 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3297 "[0]:0x%08x [1]:0x%08x "
3298 "[2]:0x%08x [3]:0x%08x\n",
3299 sig0 & HW_PRTY_ASSERT_SET_0,
3300 sig1 & HW_PRTY_ASSERT_SET_1,
3301 sig2 & HW_PRTY_ASSERT_SET_2,
3302 sig3 & HW_PRTY_ASSERT_SET_3);
3303 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3304 bp->dev->name);
3305 par_num = bnx2x_print_blocks_with_parity0(
3306 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3307 par_num = bnx2x_print_blocks_with_parity1(
3308 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3309 par_num = bnx2x_print_blocks_with_parity2(
3310 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3311 par_num = bnx2x_print_blocks_with_parity3(
3312 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3313 printk("\n");
3314 return true;
3315 } else
3316 return false;
3317}
3318
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003319bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003320{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003321 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003322 int port = BP_PORT(bp);
3323
3324 attn.sig[0] = REG_RD(bp,
3325 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3326 port*4);
3327 attn.sig[1] = REG_RD(bp,
3328 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3329 port*4);
3330 attn.sig[2] = REG_RD(bp,
3331 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3332 port*4);
3333 attn.sig[3] = REG_RD(bp,
3334 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3335 port*4);
3336
3337 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3338 attn.sig[3]);
3339}
3340
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003341
3342static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3343{
3344 u32 val;
3345 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3346
3347 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3348 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3349 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3350 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3351 "ADDRESS_ERROR\n");
3352 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3353 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3354 "INCORRECT_RCV_BEHAVIOR\n");
3355 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3356 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3357 "WAS_ERROR_ATTN\n");
3358 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3359 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3360 "VF_LENGTH_VIOLATION_ATTN\n");
3361 if (val &
3362 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3363 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3364 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3365 if (val &
3366 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3367 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3368 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3369 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3370 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3371 "TCPL_ERROR_ATTN\n");
3372 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3373 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3374 "TCPL_IN_TWO_RCBS_ATTN\n");
3375 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3376 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3377 "CSSNOOP_FIFO_OVERFLOW\n");
3378 }
3379 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3380 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3381 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3382 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3383 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3384 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3385 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3386 "_ATC_TCPL_TO_NOT_PEND\n");
3387 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3388 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3389 "ATC_GPA_MULTIPLE_HITS\n");
3390 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3391 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3392 "ATC_RCPL_TO_EMPTY_CNT\n");
3393 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3394 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3395 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3396 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3397 "ATC_IREQ_LESS_THAN_STU\n");
3398 }
3399
3400 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3401 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3402 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3403 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3404 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3405 }
3406
3407}
3408
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003409static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3410{
3411 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003412 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003413 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003414 u32 reg_addr;
3415 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003416 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003417
3418 /* need to take HW lock because MCP or other port might also
3419 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003420 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003421
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003422 if (bnx2x_chk_parity_attn(bp)) {
3423 bp->recovery_state = BNX2X_RECOVERY_INIT;
3424 bnx2x_set_reset_in_progress(bp);
3425 schedule_delayed_work(&bp->reset_task, 0);
3426 /* Disable HW interrupts */
3427 bnx2x_int_disable(bp);
3428 bnx2x_release_alr(bp);
3429 /* In case of parity errors don't handle attentions so that
3430 * other function would "see" parity errors.
3431 */
3432 return;
3433 }
3434
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003435 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3436 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3437 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3438 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003439 if (CHIP_IS_E2(bp))
3440 attn.sig[4] =
3441 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3442 else
3443 attn.sig[4] = 0;
3444
3445 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3446 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003447
3448 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3449 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003450 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003451
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003452 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3453 "%08x %08x %08x\n",
3454 index,
3455 group_mask->sig[0], group_mask->sig[1],
3456 group_mask->sig[2], group_mask->sig[3],
3457 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003458
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003459 bnx2x_attn_int_deasserted4(bp,
3460 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003461 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003462 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003463 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003464 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003465 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003466 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003467 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003468 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003469 }
3470 }
3471
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003472 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003473
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003474 if (bp->common.int_block == INT_BLOCK_HC)
3475 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3476 COMMAND_REG_ATTN_BITS_CLR);
3477 else
3478 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003479
3480 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003481 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3482 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003483 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003484
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003485 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003486 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003487
3488 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3489 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3490
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003491 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3492 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003493
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003494 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3495 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003496 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003497 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3498
3499 REG_WR(bp, reg_addr, aeu_mask);
3500 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003501
3502 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3503 bp->attn_state &= ~deasserted;
3504 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3505}
3506
3507static void bnx2x_attn_int(struct bnx2x *bp)
3508{
3509 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003510 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3511 attn_bits);
3512 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3513 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003514 u32 attn_state = bp->attn_state;
3515
3516 /* look for changed bits */
3517 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3518 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3519
3520 DP(NETIF_MSG_HW,
3521 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3522 attn_bits, attn_ack, asserted, deasserted);
3523
3524 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003525 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003526
3527 /* handle bits that were raised */
3528 if (asserted)
3529 bnx2x_attn_int_asserted(bp, asserted);
3530
3531 if (deasserted)
3532 bnx2x_attn_int_deasserted(bp, deasserted);
3533}
3534
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003535static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3536{
3537 /* No memory barriers */
3538 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3539 mmiowb(); /* keep prod updates ordered */
3540}
3541
3542#ifdef BCM_CNIC
3543static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3544 union event_ring_elem *elem)
3545{
3546 if (!bp->cnic_eth_dev.starting_cid ||
3547 cid < bp->cnic_eth_dev.starting_cid)
3548 return 1;
3549
3550 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3551
3552 if (unlikely(elem->message.data.cfc_del_event.error)) {
3553 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3554 cid);
3555 bnx2x_panic_dump(bp);
3556 }
3557 bnx2x_cnic_cfc_comp(bp, cid);
3558 return 0;
3559}
3560#endif
3561
3562static void bnx2x_eq_int(struct bnx2x *bp)
3563{
3564 u16 hw_cons, sw_cons, sw_prod;
3565 union event_ring_elem *elem;
3566 u32 cid;
3567 u8 opcode;
3568 int spqe_cnt = 0;
3569
3570 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3571
3572 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3573 * when we get the the next-page we nned to adjust so the loop
3574 * condition below will be met. The next element is the size of a
3575 * regular element and hence incrementing by 1
3576 */
3577 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3578 hw_cons++;
3579
3580 /* This function may never run in parralel with itself for a
3581 * specific bp, thus there is no need in "paired" read memory
3582 * barrier here.
3583 */
3584 sw_cons = bp->eq_cons;
3585 sw_prod = bp->eq_prod;
3586
3587 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003588 hw_cons, sw_cons, atomic_read(&bp->spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003589
3590 for (; sw_cons != hw_cons;
3591 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3592
3593
3594 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3595
3596 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3597 opcode = elem->message.opcode;
3598
3599
3600 /* handle eq element */
3601 switch (opcode) {
3602 case EVENT_RING_OPCODE_STAT_QUERY:
3603 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3604 /* nothing to do with stats comp */
3605 continue;
3606
3607 case EVENT_RING_OPCODE_CFC_DEL:
3608 /* handle according to cid range */
3609 /*
3610 * we may want to verify here that the bp state is
3611 * HALTING
3612 */
3613 DP(NETIF_MSG_IFDOWN,
3614 "got delete ramrod for MULTI[%d]\n", cid);
3615#ifdef BCM_CNIC
3616 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3617 goto next_spqe;
3618#endif
3619 bnx2x_fp(bp, cid, state) =
3620 BNX2X_FP_STATE_CLOSED;
3621
3622 goto next_spqe;
3623 }
3624
3625 switch (opcode | bp->state) {
3626 case (EVENT_RING_OPCODE_FUNCTION_START |
3627 BNX2X_STATE_OPENING_WAIT4_PORT):
3628 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3629 bp->state = BNX2X_STATE_FUNC_STARTED;
3630 break;
3631
3632 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3633 BNX2X_STATE_CLOSING_WAIT4_HALT):
3634 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3635 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3636 break;
3637
3638 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3639 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3640 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3641 bp->set_mac_pending = 0;
3642 break;
3643
3644 case (EVENT_RING_OPCODE_SET_MAC |
3645 BNX2X_STATE_CLOSING_WAIT4_HALT):
3646 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3647 bp->set_mac_pending = 0;
3648 break;
3649 default:
3650 /* unknown event log error and continue */
3651 BNX2X_ERR("Unknown EQ event %d\n",
3652 elem->message.opcode);
3653 }
3654next_spqe:
3655 spqe_cnt++;
3656 } /* for */
3657
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003658 smp_mb__before_atomic_inc();
3659 atomic_add(spqe_cnt, &bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003660
3661 bp->eq_cons = sw_cons;
3662 bp->eq_prod = sw_prod;
3663 /* Make sure that above mem writes were issued towards the memory */
3664 smp_wmb();
3665
3666 /* update producer */
3667 bnx2x_update_eq_prod(bp, bp->eq_prod);
3668}
3669
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003670static void bnx2x_sp_task(struct work_struct *work)
3671{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003672 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003673 u16 status;
3674
3675 /* Return here if interrupt is disabled */
3676 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003677 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003678 return;
3679 }
3680
3681 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003682/* if (status == 0) */
3683/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003684
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003685 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003686
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003687 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003688 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003689 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003690 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003691 }
3692
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003693 /* SP events: STAT_QUERY and others */
3694 if (status & BNX2X_DEF_SB_IDX) {
3695
3696 /* Handle EQ completions */
3697 bnx2x_eq_int(bp);
3698
3699 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3700 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3701
3702 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003703 }
3704
3705 if (unlikely(status))
3706 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3707 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003708
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003709 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3710 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003711}
3712
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003713irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003714{
3715 struct net_device *dev = dev_instance;
3716 struct bnx2x *bp = netdev_priv(dev);
3717
3718 /* Return here if interrupt is disabled */
3719 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003720 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003721 return IRQ_HANDLED;
3722 }
3723
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003724 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3725 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003726
3727#ifdef BNX2X_STOP_ON_ERROR
3728 if (unlikely(bp->panic))
3729 return IRQ_HANDLED;
3730#endif
3731
Michael Chan993ac7b2009-10-10 13:46:56 +00003732#ifdef BCM_CNIC
3733 {
3734 struct cnic_ops *c_ops;
3735
3736 rcu_read_lock();
3737 c_ops = rcu_dereference(bp->cnic_ops);
3738 if (c_ops)
3739 c_ops->cnic_handler(bp->cnic_data, NULL);
3740 rcu_read_unlock();
3741 }
3742#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003743 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003744
3745 return IRQ_HANDLED;
3746}
3747
3748/* end of slow path */
3749
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003750static void bnx2x_timer(unsigned long data)
3751{
3752 struct bnx2x *bp = (struct bnx2x *) data;
3753
3754 if (!netif_running(bp->dev))
3755 return;
3756
3757 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003758 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003759
3760 if (poll) {
3761 struct bnx2x_fastpath *fp = &bp->fp[0];
3762 int rc;
3763
Eilon Greenstein7961f792009-03-02 07:59:31 +00003764 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003765 rc = bnx2x_rx_int(fp, 1000);
3766 }
3767
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003768 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003769 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003770 u32 drv_pulse;
3771 u32 mcp_pulse;
3772
3773 ++bp->fw_drv_pulse_wr_seq;
3774 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3775 /* TBD - add SYSTEM_TIME */
3776 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003777 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003778
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003779 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003780 MCP_PULSE_SEQ_MASK);
3781 /* The delta between driver pulse and mcp response
3782 * should be 1 (before mcp response) or 0 (after mcp response)
3783 */
3784 if ((drv_pulse != mcp_pulse) &&
3785 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3786 /* someone lost a heartbeat... */
3787 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3788 drv_pulse, mcp_pulse);
3789 }
3790 }
3791
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003792 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003793 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003794
Eliezer Tamirf1410642008-02-28 11:51:50 -08003795timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003796 mod_timer(&bp->timer, jiffies + bp->current_interval);
3797}
3798
3799/* end of Statistics */
3800
3801/* nic init */
3802
3803/*
3804 * nic init service functions
3805 */
3806
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003807static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003808{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003809 u32 i;
3810 if (!(len%4) && !(addr%4))
3811 for (i = 0; i < len; i += 4)
3812 REG_WR(bp, addr + i, fill);
3813 else
3814 for (i = 0; i < len; i++)
3815 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003816
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003817}
3818
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003819/* helper: writes FP SP data to FW - data_size in dwords */
3820static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3821 int fw_sb_id,
3822 u32 *sb_data_p,
3823 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003824{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003825 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003826 for (index = 0; index < data_size; index++)
3827 REG_WR(bp, BAR_CSTRORM_INTMEM +
3828 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3829 sizeof(u32)*index,
3830 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003831}
3832
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003833static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3834{
3835 u32 *sb_data_p;
3836 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003837 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003838 struct hc_status_block_data_e1x sb_data_e1x;
3839
3840 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003841 if (CHIP_IS_E2(bp)) {
3842 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3843 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3844 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3845 sb_data_e2.common.p_func.vf_valid = false;
3846 sb_data_p = (u32 *)&sb_data_e2;
3847 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3848 } else {
3849 memset(&sb_data_e1x, 0,
3850 sizeof(struct hc_status_block_data_e1x));
3851 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3852 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3853 sb_data_e1x.common.p_func.vf_valid = false;
3854 sb_data_p = (u32 *)&sb_data_e1x;
3855 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3856 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003857 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3858
3859 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3860 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3861 CSTORM_STATUS_BLOCK_SIZE);
3862 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3863 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3864 CSTORM_SYNC_BLOCK_SIZE);
3865}
3866
3867/* helper: writes SP SB data to FW */
3868static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3869 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003870{
3871 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003872 int i;
3873 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3874 REG_WR(bp, BAR_CSTRORM_INTMEM +
3875 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3876 i*sizeof(u32),
3877 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003878}
3879
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003880static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3881{
3882 int func = BP_FUNC(bp);
3883 struct hc_sp_status_block_data sp_sb_data;
3884 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3885
3886 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3887 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3888 sp_sb_data.p_func.vf_valid = false;
3889
3890 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3891
3892 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3893 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3894 CSTORM_SP_STATUS_BLOCK_SIZE);
3895 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3896 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3897 CSTORM_SP_SYNC_BLOCK_SIZE);
3898
3899}
3900
3901
3902static inline
3903void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3904 int igu_sb_id, int igu_seg_id)
3905{
3906 hc_sm->igu_sb_id = igu_sb_id;
3907 hc_sm->igu_seg_id = igu_seg_id;
3908 hc_sm->timer_value = 0xFF;
3909 hc_sm->time_to_expire = 0xFFFFFFFF;
3910}
3911
3912void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3913 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3914{
3915 int igu_seg_id;
3916
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003917 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003918 struct hc_status_block_data_e1x sb_data_e1x;
3919 struct hc_status_block_sm *hc_sm_p;
3920 struct hc_index_data *hc_index_p;
3921 int data_size;
3922 u32 *sb_data_p;
3923
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003924 if (CHIP_INT_MODE_IS_BC(bp))
3925 igu_seg_id = HC_SEG_ACCESS_NORM;
3926 else
3927 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003928
3929 bnx2x_zero_fp_sb(bp, fw_sb_id);
3930
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003931 if (CHIP_IS_E2(bp)) {
3932 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3933 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3934 sb_data_e2.common.p_func.vf_id = vfid;
3935 sb_data_e2.common.p_func.vf_valid = vf_valid;
3936 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3937 sb_data_e2.common.same_igu_sb_1b = true;
3938 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3939 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3940 hc_sm_p = sb_data_e2.common.state_machine;
3941 hc_index_p = sb_data_e2.index_data;
3942 sb_data_p = (u32 *)&sb_data_e2;
3943 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3944 } else {
3945 memset(&sb_data_e1x, 0,
3946 sizeof(struct hc_status_block_data_e1x));
3947 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3948 sb_data_e1x.common.p_func.vf_id = 0xff;
3949 sb_data_e1x.common.p_func.vf_valid = false;
3950 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3951 sb_data_e1x.common.same_igu_sb_1b = true;
3952 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3953 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3954 hc_sm_p = sb_data_e1x.common.state_machine;
3955 hc_index_p = sb_data_e1x.index_data;
3956 sb_data_p = (u32 *)&sb_data_e1x;
3957 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3958 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003959
3960 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3961 igu_sb_id, igu_seg_id);
3962 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3963 igu_sb_id, igu_seg_id);
3964
3965 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3966
3967 /* write indecies to HW */
3968 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3969}
3970
3971static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3972 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003973{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003974 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003975 u8 ticks = usec / BNX2X_BTR;
3976
3977 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3978
3979 disable = disable ? 1 : (usec ? 0 : 1);
3980 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3981}
3982
3983static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3984 u16 tx_usec, u16 rx_usec)
3985{
3986 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3987 false, rx_usec);
3988 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3989 false, tx_usec);
3990}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003991
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003992static void bnx2x_init_def_sb(struct bnx2x *bp)
3993{
3994 struct host_sp_status_block *def_sb = bp->def_status_blk;
3995 dma_addr_t mapping = bp->def_status_blk_mapping;
3996 int igu_sp_sb_index;
3997 int igu_seg_id;
3998 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003999 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004000 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004001 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004002 int index;
4003 struct hc_sp_status_block_data sp_sb_data;
4004 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4005
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004006 if (CHIP_INT_MODE_IS_BC(bp)) {
4007 igu_sp_sb_index = DEF_SB_IGU_ID;
4008 igu_seg_id = HC_SEG_ACCESS_DEF;
4009 } else {
4010 igu_sp_sb_index = bp->igu_dsb_id;
4011 igu_seg_id = IGU_SEG_ACCESS_DEF;
4012 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004013
4014 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004015 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004016 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004017 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004018
Eliezer Tamir49d66772008-02-28 11:53:13 -08004019 bp->attn_state = 0;
4020
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004021 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4022 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004023 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004024 int sindex;
4025 /* take care of sig[0]..sig[4] */
4026 for (sindex = 0; sindex < 4; sindex++)
4027 bp->attn_group[index].sig[sindex] =
4028 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004029
4030 if (CHIP_IS_E2(bp))
4031 /*
4032 * enable5 is separate from the rest of the registers,
4033 * and therefore the address skip is 4
4034 * and not 16 between the different groups
4035 */
4036 bp->attn_group[index].sig[4] = REG_RD(bp,
4037 reg_offset + 0x10 + 0x4*index);
4038 else
4039 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004040 }
4041
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004042 if (bp->common.int_block == INT_BLOCK_HC) {
4043 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4044 HC_REG_ATTN_MSG0_ADDR_L);
4045
4046 REG_WR(bp, reg_offset, U64_LO(section));
4047 REG_WR(bp, reg_offset + 4, U64_HI(section));
4048 } else if (CHIP_IS_E2(bp)) {
4049 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4050 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4051 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004052
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004053 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4054 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004055
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004056 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004057
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004058 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4059 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4060 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4061 sp_sb_data.igu_seg_id = igu_seg_id;
4062 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004063 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004064 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004065
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004066 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004067
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004068 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004069 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004070
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004071 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004072}
4073
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004074void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004075{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004076 int i;
4077
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004078 for_each_queue(bp, i)
4079 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4080 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004081}
4082
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004083static void bnx2x_init_sp_ring(struct bnx2x *bp)
4084{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004085 spin_lock_init(&bp->spq_lock);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00004086 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004087
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004088 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004089 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4090 bp->spq_prod_bd = bp->spq;
4091 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004092}
4093
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004094static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004095{
4096 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004097 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4098 union event_ring_elem *elem =
4099 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004100
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004101 elem->next_page.addr.hi =
4102 cpu_to_le32(U64_HI(bp->eq_mapping +
4103 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4104 elem->next_page.addr.lo =
4105 cpu_to_le32(U64_LO(bp->eq_mapping +
4106 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004107 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004108 bp->eq_cons = 0;
4109 bp->eq_prod = NUM_EQ_DESC;
4110 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004111}
4112
4113static void bnx2x_init_ind_table(struct bnx2x *bp)
4114{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004115 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004116 int i;
4117
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004118 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004119 return;
4120
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004121 DP(NETIF_MSG_IFUP,
4122 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004123 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004124 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004125 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004126 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004127}
4128
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004129void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004130{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004131 int mode = bp->rx_mode;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004132 u16 cl_id;
4133
Eilon Greenstein581ce432009-07-29 00:20:04 +00004134 /* All but management unicast packets should pass to the host as well */
4135 u32 llh_mask =
4136 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4137 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4138 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4139 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004140
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004141 switch (mode) {
4142 case BNX2X_RX_MODE_NONE: /* no Rx */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004143 cl_id = BP_L_ID(bp);
4144 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004145 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004146
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004147 case BNX2X_RX_MODE_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004148 cl_id = BP_L_ID(bp);
4149 bnx2x_rxq_set_mac_filters(bp, cl_id,
4150 BNX2X_ACCEPT_UNICAST |
4151 BNX2X_ACCEPT_BROADCAST |
4152 BNX2X_ACCEPT_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004153 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004154
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004155 case BNX2X_RX_MODE_ALLMULTI:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004156 cl_id = BP_L_ID(bp);
4157 bnx2x_rxq_set_mac_filters(bp, cl_id,
4158 BNX2X_ACCEPT_UNICAST |
4159 BNX2X_ACCEPT_BROADCAST |
4160 BNX2X_ACCEPT_ALL_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004161 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004162
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004163 case BNX2X_RX_MODE_PROMISC:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004164 cl_id = BP_L_ID(bp);
4165 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4166
Eilon Greenstein581ce432009-07-29 00:20:04 +00004167 /* pass management unicast packets as well */
4168 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004169 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004170
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004171 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004172 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4173 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004174 }
4175
Eilon Greenstein581ce432009-07-29 00:20:04 +00004176 REG_WR(bp,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004177 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4178 NIG_REG_LLH0_BRB1_DRV_MASK,
Eilon Greenstein581ce432009-07-29 00:20:04 +00004179 llh_mask);
4180
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004181 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4182 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4183 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4184 bp->mac_filters.ucast_drop_all,
4185 bp->mac_filters.mcast_drop_all,
4186 bp->mac_filters.bcast_drop_all,
4187 bp->mac_filters.ucast_accept_all,
4188 bp->mac_filters.mcast_accept_all,
4189 bp->mac_filters.bcast_accept_all
4190 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004191
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004192 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004193}
4194
Eilon Greenstein471de712008-08-13 15:49:35 -07004195static void bnx2x_init_internal_common(struct bnx2x *bp)
4196{
4197 int i;
4198
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004199 if (!CHIP_IS_E1(bp)) {
4200
4201 /* xstorm needs to know whether to add ovlan to packets or not,
4202 * in switch-independent we'll write 0 to here... */
4203 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004204 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004205 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004206 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004207 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004208 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004209 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004210 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004211 }
4212
Eilon Greenstein471de712008-08-13 15:49:35 -07004213 /* Zero this manually as its initialization is
4214 currently missing in the initTool */
4215 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4216 REG_WR(bp, BAR_USTRORM_INTMEM +
4217 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004218 if (CHIP_IS_E2(bp)) {
4219 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4220 CHIP_INT_MODE_IS_BC(bp) ?
4221 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4222 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004223}
4224
4225static void bnx2x_init_internal_port(struct bnx2x *bp)
4226{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004227 /* port */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004228}
4229
Eilon Greenstein471de712008-08-13 15:49:35 -07004230static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4231{
4232 switch (load_code) {
4233 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004234 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004235 bnx2x_init_internal_common(bp);
4236 /* no break */
4237
4238 case FW_MSG_CODE_DRV_LOAD_PORT:
4239 bnx2x_init_internal_port(bp);
4240 /* no break */
4241
4242 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004243 /* internal memory per function is
4244 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004245 break;
4246
4247 default:
4248 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4249 break;
4250 }
4251}
4252
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004253static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4254{
4255 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4256
4257 fp->state = BNX2X_FP_STATE_CLOSED;
4258
4259 fp->index = fp->cid = fp_idx;
4260 fp->cl_id = BP_L_ID(bp) + fp_idx;
4261 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4262 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4263 /* qZone id equals to FW (per path) client id */
4264 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004265 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4266 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004267 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004268 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4269 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004270 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4271 /* Setup SB indicies */
4272 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4273 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4274
4275 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4276 "cl_id %d fw_sb %d igu_sb %d\n",
4277 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4278 fp->igu_sb_id);
4279 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4280 fp->fw_sb_id, fp->igu_sb_id);
4281
4282 bnx2x_update_fpsb_idx(fp);
4283}
4284
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004285void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004286{
4287 int i;
4288
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004289 for_each_queue(bp, i)
4290 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004291#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004292
4293 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4294 BNX2X_VF_ID_INVALID, false,
4295 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4296
Michael Chan37b091b2009-10-10 13:46:55 +00004297#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004298
Eilon Greenstein16119782009-03-02 07:59:27 +00004299 /* ensure status block indices were read */
4300 rmb();
4301
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004302 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004303 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004304 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004305 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004306 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004307 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004308 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004309 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004310 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004311 bnx2x_stats_init(bp);
4312
4313 /* At this point, we are ready for interrupts */
4314 atomic_set(&bp->intr_sem, 0);
4315
4316 /* flush all before enabling interrupts */
4317 mb();
4318 mmiowb();
4319
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004320 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004321
4322 /* Check for SPIO5 */
4323 bnx2x_attn_int_deasserted0(bp,
4324 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4325 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004326}
4327
4328/* end of nic init */
4329
4330/*
4331 * gzip service functions
4332 */
4333
4334static int bnx2x_gunzip_init(struct bnx2x *bp)
4335{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004336 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4337 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004338 if (bp->gunzip_buf == NULL)
4339 goto gunzip_nomem1;
4340
4341 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4342 if (bp->strm == NULL)
4343 goto gunzip_nomem2;
4344
4345 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4346 GFP_KERNEL);
4347 if (bp->strm->workspace == NULL)
4348 goto gunzip_nomem3;
4349
4350 return 0;
4351
4352gunzip_nomem3:
4353 kfree(bp->strm);
4354 bp->strm = NULL;
4355
4356gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004357 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4358 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004359 bp->gunzip_buf = NULL;
4360
4361gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004362 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4363 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004364 return -ENOMEM;
4365}
4366
4367static void bnx2x_gunzip_end(struct bnx2x *bp)
4368{
4369 kfree(bp->strm->workspace);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004370 kfree(bp->strm);
4371 bp->strm = NULL;
4372
4373 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004374 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4375 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004376 bp->gunzip_buf = NULL;
4377 }
4378}
4379
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004380static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004381{
4382 int n, rc;
4383
4384 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004385 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4386 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004387 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004388 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004389
4390 n = 10;
4391
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004392#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004393
4394 if (zbuf[3] & FNAME)
4395 while ((zbuf[n++] != 0) && (n < len));
4396
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004397 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004398 bp->strm->avail_in = len - n;
4399 bp->strm->next_out = bp->gunzip_buf;
4400 bp->strm->avail_out = FW_BUF_SIZE;
4401
4402 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4403 if (rc != Z_OK)
4404 return rc;
4405
4406 rc = zlib_inflate(bp->strm, Z_FINISH);
4407 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004408 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4409 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004410
4411 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4412 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004413 netdev_err(bp->dev, "Firmware decompression error:"
4414 " gunzip_outlen (%d) not aligned\n",
4415 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004416 bp->gunzip_outlen >>= 2;
4417
4418 zlib_inflateEnd(bp->strm);
4419
4420 if (rc == Z_STREAM_END)
4421 return 0;
4422
4423 return rc;
4424}
4425
4426/* nic load/unload */
4427
4428/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004429 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004430 */
4431
4432/* send a NIG loopback debug packet */
4433static void bnx2x_lb_pckt(struct bnx2x *bp)
4434{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004435 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004436
4437 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004438 wb_write[0] = 0x55555555;
4439 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004440 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004441 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004442
4443 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004444 wb_write[0] = 0x09000000;
4445 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004446 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004447 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004448}
4449
4450/* some of the internal memories
4451 * are not directly readable from the driver
4452 * to test them we send debug packets
4453 */
4454static int bnx2x_int_mem_test(struct bnx2x *bp)
4455{
4456 int factor;
4457 int count, i;
4458 u32 val = 0;
4459
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004460 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004461 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004462 else if (CHIP_REV_IS_EMUL(bp))
4463 factor = 200;
4464 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004465 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004466
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004467 /* Disable inputs of parser neighbor blocks */
4468 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4469 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4470 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004471 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004472
4473 /* Write 0 to parser credits for CFC search request */
4474 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4475
4476 /* send Ethernet packet */
4477 bnx2x_lb_pckt(bp);
4478
4479 /* TODO do i reset NIG statistic? */
4480 /* Wait until NIG register shows 1 packet of size 0x10 */
4481 count = 1000 * factor;
4482 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004483
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004484 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4485 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004486 if (val == 0x10)
4487 break;
4488
4489 msleep(10);
4490 count--;
4491 }
4492 if (val != 0x10) {
4493 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4494 return -1;
4495 }
4496
4497 /* Wait until PRS register shows 1 packet */
4498 count = 1000 * factor;
4499 while (count) {
4500 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004501 if (val == 1)
4502 break;
4503
4504 msleep(10);
4505 count--;
4506 }
4507 if (val != 0x1) {
4508 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4509 return -2;
4510 }
4511
4512 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004513 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004514 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004515 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004516 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004517 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4518 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004519
4520 DP(NETIF_MSG_HW, "part2\n");
4521
4522 /* Disable inputs of parser neighbor blocks */
4523 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4524 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4525 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004526 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004527
4528 /* Write 0 to parser credits for CFC search request */
4529 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4530
4531 /* send 10 Ethernet packets */
4532 for (i = 0; i < 10; i++)
4533 bnx2x_lb_pckt(bp);
4534
4535 /* Wait until NIG register shows 10 + 1
4536 packets of size 11*0x10 = 0xb0 */
4537 count = 1000 * factor;
4538 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004539
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004540 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4541 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004542 if (val == 0xb0)
4543 break;
4544
4545 msleep(10);
4546 count--;
4547 }
4548 if (val != 0xb0) {
4549 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4550 return -3;
4551 }
4552
4553 /* Wait until PRS register shows 2 packets */
4554 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4555 if (val != 2)
4556 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4557
4558 /* Write 1 to parser credits for CFC search request */
4559 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4560
4561 /* Wait until PRS register shows 3 packets */
4562 msleep(10 * factor);
4563 /* Wait until NIG register shows 1 packet of size 0x10 */
4564 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4565 if (val != 3)
4566 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4567
4568 /* clear NIG EOP FIFO */
4569 for (i = 0; i < 11; i++)
4570 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4571 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4572 if (val != 1) {
4573 BNX2X_ERR("clear of NIG failed\n");
4574 return -4;
4575 }
4576
4577 /* Reset and init BRB, PRS, NIG */
4578 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4579 msleep(50);
4580 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4581 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004582 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4583 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004584#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004585 /* set NIC mode */
4586 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4587#endif
4588
4589 /* Enable inputs of parser neighbor blocks */
4590 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4591 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4592 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004593 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004594
4595 DP(NETIF_MSG_HW, "done\n");
4596
4597 return 0; /* OK */
4598}
4599
4600static void enable_blocks_attention(struct bnx2x *bp)
4601{
4602 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004603 if (CHIP_IS_E2(bp))
4604 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4605 else
4606 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004607 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4608 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004609 /*
4610 * mask read length error interrupts in brb for parser
4611 * (parsing unit and 'checksum and crc' unit)
4612 * these errors are legal (PU reads fixed length and CAC can cause
4613 * read length error on truncated packets)
4614 */
4615 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004616 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4617 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4618 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4619 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4620 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004621/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4622/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004623 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4624 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4625 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004626/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4627/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004628 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4629 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4630 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4631 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004632/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4633/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004634
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004635 if (CHIP_REV_IS_FPGA(bp))
4636 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004637 else if (CHIP_IS_E2(bp))
4638 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4639 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4640 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4641 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4642 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4643 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004644 else
4645 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004646 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4647 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4648 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004649/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4650/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004651 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4652 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004653/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4654 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004655}
4656
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004657static const struct {
4658 u32 addr;
4659 u32 mask;
4660} bnx2x_parity_mask[] = {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004661 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4662 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4663 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4664 {HC_REG_HC_PRTY_MASK, 0x7},
4665 {MISC_REG_MISC_PRTY_MASK, 0x1},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004666 {QM_REG_QM_PRTY_MASK, 0x0},
4667 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004668 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4669 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004670 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4671 {CDU_REG_CDU_PRTY_MASK, 0x0},
4672 {CFC_REG_CFC_PRTY_MASK, 0x0},
4673 {DBG_REG_DBG_PRTY_MASK, 0x0},
4674 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4675 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4676 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4677 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4678 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4679 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4680 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4681 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4682 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4683 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4684 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4685 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4686 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4687 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4688 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004689};
4690
4691static void enable_blocks_parity(struct bnx2x *bp)
4692{
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004693 int i;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004694
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004695 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004696 REG_WR(bp, bnx2x_parity_mask[i].addr,
4697 bnx2x_parity_mask[i].mask);
4698}
4699
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004700
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004701static void bnx2x_reset_common(struct bnx2x *bp)
4702{
4703 /* reset_common */
4704 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4705 0xd3ffff7f);
4706 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4707}
4708
Eilon Greenstein573f2032009-08-12 08:24:14 +00004709static void bnx2x_init_pxp(struct bnx2x *bp)
4710{
4711 u16 devctl;
4712 int r_order, w_order;
4713
4714 pci_read_config_word(bp->pdev,
4715 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4716 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4717 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4718 if (bp->mrrs == -1)
4719 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4720 else {
4721 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4722 r_order = bp->mrrs;
4723 }
4724
4725 bnx2x_init_pxp_arb(bp, r_order, w_order);
4726}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004727
4728static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4729{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004730 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004731 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004732 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004733
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004734 if (BP_NOMCP(bp))
4735 return;
4736
4737 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004738 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4739 SHARED_HW_CFG_FAN_FAILURE_MASK;
4740
4741 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4742 is_required = 1;
4743
4744 /*
4745 * The fan failure mechanism is usually related to the PHY type since
4746 * the power consumption of the board is affected by the PHY. Currently,
4747 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4748 */
4749 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4750 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004751 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004752 bnx2x_fan_failure_det_req(
4753 bp,
4754 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004755 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004756 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004757 }
4758
4759 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4760
4761 if (is_required == 0)
4762 return;
4763
4764 /* Fan failure is indicated by SPIO 5 */
4765 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4766 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4767
4768 /* set to active low mode */
4769 val = REG_RD(bp, MISC_REG_SPIO_INT);
4770 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004771 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004772 REG_WR(bp, MISC_REG_SPIO_INT, val);
4773
4774 /* enable interrupt to signal the IGU */
4775 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4776 val |= (1 << MISC_REGISTERS_SPIO_5);
4777 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4778}
4779
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004780static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4781{
4782 u32 offset = 0;
4783
4784 if (CHIP_IS_E1(bp))
4785 return;
4786 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4787 return;
4788
4789 switch (BP_ABS_FUNC(bp)) {
4790 case 0:
4791 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4792 break;
4793 case 1:
4794 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4795 break;
4796 case 2:
4797 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4798 break;
4799 case 3:
4800 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4801 break;
4802 case 4:
4803 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4804 break;
4805 case 5:
4806 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4807 break;
4808 case 6:
4809 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4810 break;
4811 case 7:
4812 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4813 break;
4814 default:
4815 return;
4816 }
4817
4818 REG_WR(bp, offset, pretend_func_num);
4819 REG_RD(bp, offset);
4820 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4821}
4822
4823static void bnx2x_pf_disable(struct bnx2x *bp)
4824{
4825 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4826 val &= ~IGU_PF_CONF_FUNC_EN;
4827
4828 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4829 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4830 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4831}
4832
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004833static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004834{
4835 u32 val, i;
4836
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004837 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004838
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004839 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004840 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4841 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4842
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004843 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004844 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004845 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004846
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004847 if (CHIP_IS_E2(bp)) {
4848 u8 fid;
4849
4850 /**
4851 * 4-port mode or 2-port mode we need to turn of master-enable
4852 * for everyone, after that, turn it back on for self.
4853 * so, we disregard multi-function or not, and always disable
4854 * for all functions on the given path, this means 0,2,4,6 for
4855 * path 0 and 1,3,5,7 for path 1
4856 */
4857 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4858 if (fid == BP_ABS_FUNC(bp)) {
4859 REG_WR(bp,
4860 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4861 1);
4862 continue;
4863 }
4864
4865 bnx2x_pretend_func(bp, fid);
4866 /* clear pf enable */
4867 bnx2x_pf_disable(bp);
4868 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4869 }
4870 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004871
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004872 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004873 if (CHIP_IS_E1(bp)) {
4874 /* enable HW interrupt from PXP on USDM overflow
4875 bit 16 on INT_MASK_0 */
4876 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004877 }
4878
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004879 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004880 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004881
4882#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004883 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4884 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4885 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4886 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4887 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004888 /* make sure this value is 0 */
4889 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004890
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004891/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4892 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4893 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4894 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4895 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004896#endif
4897
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004898 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4899
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004900 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4901 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004902
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004903 /* let the HW do it's magic ... */
4904 msleep(100);
4905 /* finish PXP init */
4906 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4907 if (val != 1) {
4908 BNX2X_ERR("PXP2 CFG failed\n");
4909 return -EBUSY;
4910 }
4911 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4912 if (val != 1) {
4913 BNX2X_ERR("PXP2 RD_INIT failed\n");
4914 return -EBUSY;
4915 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004916
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004917 /* Timers bug workaround E2 only. We need to set the entire ILT to
4918 * have entries with value "0" and valid bit on.
4919 * This needs to be done by the first PF that is loaded in a path
4920 * (i.e. common phase)
4921 */
4922 if (CHIP_IS_E2(bp)) {
4923 struct ilt_client_info ilt_cli;
4924 struct bnx2x_ilt ilt;
4925 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4926 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4927
4928 /* initalize dummy TM client */
4929 ilt_cli.start = 0;
4930 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4931 ilt_cli.client_num = ILT_CLIENT_TM;
4932
4933 /* Step 1: set zeroes to all ilt page entries with valid bit on
4934 * Step 2: set the timers first/last ilt entry to point
4935 * to the entire range to prevent ILT range error for 3rd/4th
4936 * vnic (this code assumes existance of the vnic)
4937 *
4938 * both steps performed by call to bnx2x_ilt_client_init_op()
4939 * with dummy TM client
4940 *
4941 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4942 * and his brother are split registers
4943 */
4944 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4945 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4946 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4947
4948 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4949 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4950 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4951 }
4952
4953
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004954 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4955 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004956
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004957 if (CHIP_IS_E2(bp)) {
4958 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4959 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4960 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4961
4962 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4963
4964 /* let the HW do it's magic ... */
4965 do {
4966 msleep(200);
4967 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4968 } while (factor-- && (val != 1));
4969
4970 if (val != 1) {
4971 BNX2X_ERR("ATC_INIT failed\n");
4972 return -EBUSY;
4973 }
4974 }
4975
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004976 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004977
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004978 /* clean the DMAE memory */
4979 bp->dmae_ready = 1;
4980 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004981
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004982 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4983 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4984 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4985 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004986
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004987 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4988 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4989 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4990 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4991
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004992 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004993
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004994 if (CHIP_MODE_IS_4_PORT(bp))
4995 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004996
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004997 /* QM queues pointers table */
4998 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00004999
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005000 /* soft reset pulse */
5001 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5002 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005003
Michael Chan37b091b2009-10-10 13:46:55 +00005004#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005005 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005006#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005007
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005008 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005009 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5010
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005011 if (!CHIP_REV_IS_SLOW(bp)) {
5012 /* enable hw interrupt from doorbell Q */
5013 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5014 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005015
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005016 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005017 if (CHIP_MODE_IS_4_PORT(bp)) {
5018 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5019 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5020 }
5021
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005022 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005023 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005024#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005025 /* set NIC mode */
5026 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005027#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005028 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005029 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005030
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005031 if (CHIP_IS_E2(bp)) {
5032 /* Bit-map indicating which L2 hdrs may appear after the
5033 basic Ethernet header */
5034 int has_ovlan = IS_MF(bp);
5035 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5036 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5037 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005038
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005039 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5040 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5041 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5042 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005043
Eilon Greensteinca003922009-08-12 22:53:28 -07005044 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5045 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5046 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5047 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005048
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005049 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5050 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5051 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5052 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005053
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005054 if (CHIP_MODE_IS_4_PORT(bp))
5055 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5056
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005057 /* sync semi rtc */
5058 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5059 0x80000000);
5060 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5061 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005062
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005063 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5064 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5065 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005066
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005067 if (CHIP_IS_E2(bp)) {
5068 int has_ovlan = IS_MF(bp);
5069 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5070 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5071 }
5072
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005073 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005074 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5075 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005076
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005077 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005078#ifdef BCM_CNIC
5079 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5080 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5081 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5082 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5083 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5084 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5085 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5086 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5087 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5088 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5089#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005090 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005091
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005092 if (sizeof(union cdu_context) != 1024)
5093 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005094 dev_alert(&bp->pdev->dev, "please adjust the size "
5095 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005096 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005097
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005098 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005099 val = (4 << 24) + (0 << 12) + 1024;
5100 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005101
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005102 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005103 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005104 /* enable context validation interrupt from CFC */
5105 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5106
5107 /* set the thresholds to prevent CFC/CDU race */
5108 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005109
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005110 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005111
5112 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5113 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5114
5115 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005116 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005117
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005118 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005119 /* Reset PCIE errors for debug */
5120 REG_WR(bp, 0x2814, 0xffffffff);
5121 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005122
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005123 if (CHIP_IS_E2(bp)) {
5124 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5125 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5126 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5127 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5128 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5129 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5130 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5131 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5132 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5133 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5134 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5135 }
5136
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005137 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005138 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005139 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005140 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005141
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005142 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005143 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005144 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5145 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005146 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005147 if (CHIP_IS_E2(bp)) {
5148 /* Bit-map indicating which L2 hdrs may appear after the
5149 basic Ethernet header */
5150 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5151 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005152
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005153 if (CHIP_REV_IS_SLOW(bp))
5154 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005155
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005156 /* finish CFC init */
5157 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5158 if (val != 1) {
5159 BNX2X_ERR("CFC LL_INIT failed\n");
5160 return -EBUSY;
5161 }
5162 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5163 if (val != 1) {
5164 BNX2X_ERR("CFC AC_INIT failed\n");
5165 return -EBUSY;
5166 }
5167 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5168 if (val != 1) {
5169 BNX2X_ERR("CFC CAM_INIT failed\n");
5170 return -EBUSY;
5171 }
5172 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005173
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005174 if (CHIP_IS_E1(bp)) {
5175 /* read NIG statistic
5176 to see if this is our first up since powerup */
5177 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5178 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005179
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005180 /* do internal memory self test */
5181 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5182 BNX2X_ERR("internal mem self test failed\n");
5183 return -EBUSY;
5184 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005185 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005186
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005187 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005188 bp->common.shmem_base,
5189 bp->common.shmem2_base);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005190
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005191 bnx2x_setup_fan_failure_detection(bp);
5192
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005193 /* clear PXP2 attentions */
5194 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005195
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005196 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005197 if (CHIP_PARITY_SUPPORTED(bp))
5198 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005199
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005200 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005201 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5202 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5203 CHIP_IS_E1x(bp)) {
5204 u32 shmem_base[2], shmem2_base[2];
5205 shmem_base[0] = bp->common.shmem_base;
5206 shmem2_base[0] = bp->common.shmem2_base;
5207 if (CHIP_IS_E2(bp)) {
5208 shmem_base[1] =
5209 SHMEM2_RD(bp, other_shmem_base_addr);
5210 shmem2_base[1] =
5211 SHMEM2_RD(bp, other_shmem2_base_addr);
5212 }
5213 bnx2x_acquire_phy_lock(bp);
5214 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5215 bp->common.chip_id);
5216 bnx2x_release_phy_lock(bp);
5217 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005218 } else
5219 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5220
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005221 return 0;
5222}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005223
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005224static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005225{
5226 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005227 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005228 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005229 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005230
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005231 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005232
5233 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005234
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005235 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005236 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005237
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005238 /* Timers bug workaround: disables the pf_master bit in pglue at
5239 * common phase, we need to enable it here before any dmae access are
5240 * attempted. Therefore we manually added the enable-master to the
5241 * port phase (it also happens in the function phase)
5242 */
5243 if (CHIP_IS_E2(bp))
5244 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5245
Eilon Greensteinca003922009-08-12 22:53:28 -07005246 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5247 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5248 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005249 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005250
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005251 /* QM cid (connection) count */
5252 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005253
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005254#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005255 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005256 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5257 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005258#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005259
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005260 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005261
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005262 if (CHIP_MODE_IS_4_PORT(bp))
5263 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005264
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005265 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5266 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5267 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5268 /* no pause for emulation and FPGA */
5269 low = 0;
5270 high = 513;
5271 } else {
5272 if (IS_MF(bp))
5273 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5274 else if (bp->dev->mtu > 4096) {
5275 if (bp->flags & ONE_PORT_FLAG)
5276 low = 160;
5277 else {
5278 val = bp->dev->mtu;
5279 /* (24*1024 + val*4)/256 */
5280 low = 96 + (val/64) +
5281 ((val % 64) ? 1 : 0);
5282 }
5283 } else
5284 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5285 high = low + 56; /* 14*1024/256 */
5286 }
5287 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5288 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5289 }
5290
5291 if (CHIP_MODE_IS_4_PORT(bp)) {
5292 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5293 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5294 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5295 BRB1_REG_MAC_GUARANTIED_0), 40);
5296 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005297
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005298 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005299
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005300 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005301 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005302 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005303 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005304
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005305 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5306 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5307 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5308 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005309 if (CHIP_MODE_IS_4_PORT(bp))
5310 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005311
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005312 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005313 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005314
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005315 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005316
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005317 if (!CHIP_IS_E2(bp)) {
5318 /* configure PBF to work without PAUSE mtu 9000 */
5319 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005320
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005321 /* update threshold */
5322 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5323 /* update init credit */
5324 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005325
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005326 /* probe changes */
5327 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5328 udelay(50);
5329 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5330 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005331
Michael Chan37b091b2009-10-10 13:46:55 +00005332#ifdef BCM_CNIC
5333 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005334#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005335 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005336 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005337
5338 if (CHIP_IS_E1(bp)) {
5339 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5340 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5341 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005342 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005343
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005344 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5345
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005346 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005347 /* init aeu_mask_attn_func_0/1:
5348 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5349 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5350 * bits 4-7 are used for "per vn group attention" */
5351 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005352 (IS_MF(bp) ? 0xF7 : 0x7));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005353
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005354 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005355 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005356 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005357 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005358 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005359
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005360 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005361
5362 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5363
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005364 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005365 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005366 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005367 (IS_MF(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005368
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005369 if (CHIP_IS_E2(bp)) {
5370 val = 0;
5371 switch (bp->mf_mode) {
5372 case MULTI_FUNCTION_SD:
5373 val = 1;
5374 break;
5375 case MULTI_FUNCTION_SI:
5376 val = 2;
5377 break;
5378 }
5379
5380 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5381 NIG_REG_LLH0_CLS_TYPE), val);
5382 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005383 {
5384 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5385 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5386 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5387 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005388 }
5389
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005390 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005391 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005392 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005393 bp->common.shmem_base,
5394 bp->common.shmem2_base);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005395 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005396 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005397 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5398 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5399 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005400 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005401 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005402 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005403 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005404
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005405 return 0;
5406}
5407
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005408static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5409{
5410 int reg;
5411
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005412 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005413 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005414 else
5415 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005416
5417 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5418}
5419
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005420static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5421{
5422 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5423}
5424
5425static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5426{
5427 u32 i, base = FUNC_ILT_BASE(func);
5428 for (i = base; i < base + ILT_PER_FUNC; i++)
5429 bnx2x_ilt_wr(bp, i, 0);
5430}
5431
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005432static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005433{
5434 int port = BP_PORT(bp);
5435 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005436 struct bnx2x_ilt *ilt = BP_ILT(bp);
5437 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005438 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005439 int i;
5440
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005441 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005442
Eilon Greenstein8badd272009-02-12 08:36:15 +00005443 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005444 if (bp->common.int_block == INT_BLOCK_HC) {
5445 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5446 val = REG_RD(bp, addr);
5447 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5448 REG_WR(bp, addr, val);
5449 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005450
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005451 ilt = BP_ILT(bp);
5452 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005453
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005454 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5455 ilt->lines[cdu_ilt_start + i].page =
5456 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5457 ilt->lines[cdu_ilt_start + i].page_mapping =
5458 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5459 /* cdu ilt pages are allocated manually so there's no need to
5460 set the size */
5461 }
5462 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005463
Michael Chan37b091b2009-10-10 13:46:55 +00005464#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005465 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005466
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005467 /* T1 hash bits value determines the T1 number of entries */
5468 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005469#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005470
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005471#ifndef BCM_CNIC
5472 /* set NIC mode */
5473 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5474#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005475
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005476 if (CHIP_IS_E2(bp)) {
5477 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5478
5479 /* Turn on a single ISR mode in IGU if driver is going to use
5480 * INT#x or MSI
5481 */
5482 if (!(bp->flags & USING_MSIX_FLAG))
5483 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5484 /*
5485 * Timers workaround bug: function init part.
5486 * Need to wait 20msec after initializing ILT,
5487 * needed to make sure there are no requests in
5488 * one of the PXP internal queues with "old" ILT addresses
5489 */
5490 msleep(20);
5491 /*
5492 * Master enable - Due to WB DMAE writes performed before this
5493 * register is re-initialized as part of the regular function
5494 * init
5495 */
5496 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5497 /* Enable the function in IGU */
5498 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5499 }
5500
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005501 bp->dmae_ready = 1;
5502
5503 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5504
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005505 if (CHIP_IS_E2(bp))
5506 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5507
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005508 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5509 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5510 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5511 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5512 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5513 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5514 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5515 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5516 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5517
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005518 if (CHIP_IS_E2(bp)) {
5519 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5520 BP_PATH(bp));
5521 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5522 BP_PATH(bp));
5523 }
5524
5525 if (CHIP_MODE_IS_4_PORT(bp))
5526 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5527
5528 if (CHIP_IS_E2(bp))
5529 REG_WR(bp, QM_REG_PF_EN, 1);
5530
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005531 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005532
5533 if (CHIP_MODE_IS_4_PORT(bp))
5534 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5535
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005536 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5537 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5538 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5539 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5540 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5541 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5542 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5543 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5544 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5545 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5546 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005547 if (CHIP_IS_E2(bp))
5548 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5549
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005550 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5551
5552 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5553
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005554 if (CHIP_IS_E2(bp))
5555 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5556
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005557 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005558 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005559 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005560 }
5561
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005562 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5563
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005564 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005565 if (bp->common.int_block == INT_BLOCK_HC) {
5566 if (CHIP_IS_E1H(bp)) {
5567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5568
5569 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5570 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5571 }
5572 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5573
5574 } else {
5575 int num_segs, sb_idx, prod_offset;
5576
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005577 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5578
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005579 if (CHIP_IS_E2(bp)) {
5580 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5581 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5582 }
5583
5584 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5585
5586 if (CHIP_IS_E2(bp)) {
5587 int dsb_idx = 0;
5588 /**
5589 * Producer memory:
5590 * E2 mode: address 0-135 match to the mapping memory;
5591 * 136 - PF0 default prod; 137 - PF1 default prod;
5592 * 138 - PF2 default prod; 139 - PF3 default prod;
5593 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5594 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5595 * 144-147 reserved.
5596 *
5597 * E1.5 mode - In backward compatible mode;
5598 * for non default SB; each even line in the memory
5599 * holds the U producer and each odd line hold
5600 * the C producer. The first 128 producers are for
5601 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5602 * producers are for the DSB for each PF.
5603 * Each PF has five segments: (the order inside each
5604 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5605 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5606 * 144-147 attn prods;
5607 */
5608 /* non-default-status-blocks */
5609 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5610 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5611 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5612 prod_offset = (bp->igu_base_sb + sb_idx) *
5613 num_segs;
5614
5615 for (i = 0; i < num_segs; i++) {
5616 addr = IGU_REG_PROD_CONS_MEMORY +
5617 (prod_offset + i) * 4;
5618 REG_WR(bp, addr, 0);
5619 }
5620 /* send consumer update with value 0 */
5621 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5622 USTORM_ID, 0, IGU_INT_NOP, 1);
5623 bnx2x_igu_clear_sb(bp,
5624 bp->igu_base_sb + sb_idx);
5625 }
5626
5627 /* default-status-blocks */
5628 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5629 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5630
5631 if (CHIP_MODE_IS_4_PORT(bp))
5632 dsb_idx = BP_FUNC(bp);
5633 else
5634 dsb_idx = BP_E1HVN(bp);
5635
5636 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5637 IGU_BC_BASE_DSB_PROD + dsb_idx :
5638 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5639
5640 for (i = 0; i < (num_segs * E1HVN_MAX);
5641 i += E1HVN_MAX) {
5642 addr = IGU_REG_PROD_CONS_MEMORY +
5643 (prod_offset + i)*4;
5644 REG_WR(bp, addr, 0);
5645 }
5646 /* send consumer update with 0 */
5647 if (CHIP_INT_MODE_IS_BC(bp)) {
5648 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5649 USTORM_ID, 0, IGU_INT_NOP, 1);
5650 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5651 CSTORM_ID, 0, IGU_INT_NOP, 1);
5652 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5653 XSTORM_ID, 0, IGU_INT_NOP, 1);
5654 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5655 TSTORM_ID, 0, IGU_INT_NOP, 1);
5656 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5657 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5658 } else {
5659 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5660 USTORM_ID, 0, IGU_INT_NOP, 1);
5661 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5662 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5663 }
5664 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5665
5666 /* !!! these should become driver const once
5667 rf-tool supports split-68 const */
5668 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5669 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5670 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5671 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5672 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5673 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5674 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005675 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005676
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005677 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005678 REG_WR(bp, 0x2114, 0xffffffff);
5679 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005680
5681 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5682 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5683 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5684 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5685 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5686 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5687
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005688 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005689
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005690 return 0;
5691}
5692
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005693int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005694{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005695 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005696
5697 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005698 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005699
5700 bp->dmae_ready = 0;
5701 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00005702 rc = bnx2x_gunzip_init(bp);
5703 if (rc)
5704 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005705
5706 switch (load_code) {
5707 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005708 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005709 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005710 if (rc)
5711 goto init_hw_err;
5712 /* no break */
5713
5714 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005715 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005716 if (rc)
5717 goto init_hw_err;
5718 /* no break */
5719
5720 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005721 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005722 if (rc)
5723 goto init_hw_err;
5724 break;
5725
5726 default:
5727 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5728 break;
5729 }
5730
5731 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005732 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005733
5734 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005735 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005736 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005737 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5738 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005739
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005740init_hw_err:
5741 bnx2x_gunzip_end(bp);
5742
5743 return rc;
5744}
5745
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005746void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005747{
5748
5749#define BNX2X_PCI_FREE(x, y, size) \
5750 do { \
5751 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005752 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005753 x = NULL; \
5754 y = 0; \
5755 } \
5756 } while (0)
5757
5758#define BNX2X_FREE(x) \
5759 do { \
5760 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005761 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005762 x = NULL; \
5763 } \
5764 } while (0)
5765
5766 int i;
5767
5768 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005769 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005770 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005771 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005772 if (CHIP_IS_E2(bp))
5773 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5774 bnx2x_fp(bp, i, status_blk_mapping),
5775 sizeof(struct host_hc_status_block_e2));
5776 else
5777 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5778 bnx2x_fp(bp, i, status_blk_mapping),
5779 sizeof(struct host_hc_status_block_e1x));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005780 }
5781 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005782 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005783
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005784 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005785 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5786 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5787 bnx2x_fp(bp, i, rx_desc_mapping),
5788 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5789
5790 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5791 bnx2x_fp(bp, i, rx_comp_mapping),
5792 sizeof(struct eth_fast_path_rx_cqe) *
5793 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005794
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005795 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005796 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005797 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5798 bnx2x_fp(bp, i, rx_sge_mapping),
5799 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5800 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005801 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005802 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005803
5804 /* fastpath tx rings: tx_buf tx_desc */
5805 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5806 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5807 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005808 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005809 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005810 /* end of fastpath */
5811
5812 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005813 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005814
5815 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005816 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005817
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005818 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5819 bp->context.size);
5820
5821 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5822
5823 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005824
Michael Chan37b091b2009-10-10 13:46:55 +00005825#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005826 if (CHIP_IS_E2(bp))
5827 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5828 sizeof(struct host_hc_status_block_e2));
5829 else
5830 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5831 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005832
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005833 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005834#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005835
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005836 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005837
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005838 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5839 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5840
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005841#undef BNX2X_PCI_FREE
5842#undef BNX2X_KFREE
5843}
5844
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005845static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5846{
5847 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5848 if (CHIP_IS_E2(bp)) {
5849 bnx2x_fp(bp, index, sb_index_values) =
5850 (__le16 *)status_blk.e2_sb->sb.index_values;
5851 bnx2x_fp(bp, index, sb_running_index) =
5852 (__le16 *)status_blk.e2_sb->sb.running_index;
5853 } else {
5854 bnx2x_fp(bp, index, sb_index_values) =
5855 (__le16 *)status_blk.e1x_sb->sb.index_values;
5856 bnx2x_fp(bp, index, sb_running_index) =
5857 (__le16 *)status_blk.e1x_sb->sb.running_index;
5858 }
5859}
5860
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005861int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005862{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005863#define BNX2X_PCI_ALLOC(x, y, size) \
5864 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005865 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005866 if (x == NULL) \
5867 goto alloc_mem_err; \
5868 memset(x, 0, size); \
5869 } while (0)
5870
5871#define BNX2X_ALLOC(x, size) \
5872 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005873 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005874 if (x == NULL) \
5875 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005876 } while (0)
5877
5878 int i;
5879
5880 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005881 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005882 for_each_queue(bp, i) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005883 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005884 bnx2x_fp(bp, i, bp) = bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005885 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005886 if (CHIP_IS_E2(bp))
5887 BNX2X_PCI_ALLOC(sb->e2_sb,
5888 &bnx2x_fp(bp, i, status_blk_mapping),
5889 sizeof(struct host_hc_status_block_e2));
5890 else
5891 BNX2X_PCI_ALLOC(sb->e1x_sb,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005892 &bnx2x_fp(bp, i, status_blk_mapping),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005893 sizeof(struct host_hc_status_block_e1x));
5894
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005895 set_sb_shortcuts(bp, i);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005896 }
5897 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005898 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005899
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005900 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005901 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5902 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5903 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5904 &bnx2x_fp(bp, i, rx_desc_mapping),
5905 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5906
5907 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5908 &bnx2x_fp(bp, i, rx_comp_mapping),
5909 sizeof(struct eth_fast_path_rx_cqe) *
5910 NUM_RCQ_BD);
5911
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005912 /* SGE ring */
5913 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5914 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5915 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5916 &bnx2x_fp(bp, i, rx_sge_mapping),
5917 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005918 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005919 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005920 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005921
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005922 /* fastpath tx rings: tx_buf tx_desc */
5923 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5924 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5925 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5926 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005927 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005928 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005929 /* end of fastpath */
5930
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005931#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005932 if (CHIP_IS_E2(bp))
5933 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5934 sizeof(struct host_hc_status_block_e2));
5935 else
5936 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5937 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005938
5939 /* allocate searcher T2 table */
5940 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5941#endif
5942
5943
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005944 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005945 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005946
5947 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5948 sizeof(struct bnx2x_slowpath));
5949
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005950 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005951
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005952 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5953 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005954
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005955 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005956
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005957 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5958 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005959
5960 /* Slow path ring */
5961 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5962
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005963 /* EQ */
5964 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5965 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005966 return 0;
5967
5968alloc_mem_err:
5969 bnx2x_free_mem(bp);
5970 return -ENOMEM;
5971
5972#undef BNX2X_PCI_ALLOC
5973#undef BNX2X_ALLOC
5974}
5975
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005976/*
5977 * Init service functions
5978 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005979int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005980{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005981 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005982
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005983 /* Wait for completion */
5984 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5985 WAIT_RAMROD_COMMON);
5986}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005987
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005988int bnx2x_func_stop(struct bnx2x *bp)
5989{
5990 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005991
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005992 /* Wait for completion */
5993 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5994 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005995}
5996
Michael Chane665bfda52009-10-10 13:46:54 +00005997/**
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005998 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
Michael Chane665bfda52009-10-10 13:46:54 +00005999 *
6000 * @param bp driver descriptor
6001 * @param set set or clear an entry (1 or 0)
6002 * @param mac pointer to a buffer containing a MAC
6003 * @param cl_bit_vec bit vector of clients to register a MAC for
6004 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006005 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfda52009-10-10 13:46:54 +00006006 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006007static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006008 u32 cl_bit_vec, u8 cam_offset,
6009 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006010{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006011 struct mac_configuration_cmd *config =
6012 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6013 int ramrod_flags = WAIT_RAMROD_COMMON;
6014
6015 bp->set_mac_pending = 1;
6016 smp_wmb();
6017
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006018 config->hdr.length = 1;
Michael Chane665bfda52009-10-10 13:46:54 +00006019 config->hdr.offset = cam_offset;
6020 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006021 config->hdr.reserved1 = 0;
6022
6023 /* primary MAC */
6024 config->config_table[0].msb_mac_addr =
Michael Chane665bfda52009-10-10 13:46:54 +00006025 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006026 config->config_table[0].middle_mac_addr =
Michael Chane665bfda52009-10-10 13:46:54 +00006027 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006028 config->config_table[0].lsb_mac_addr =
Michael Chane665bfda52009-10-10 13:46:54 +00006029 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006030 config->config_table[0].clients_bit_vector =
Michael Chane665bfda52009-10-10 13:46:54 +00006031 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006032 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006033 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006034 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006035 SET_FLAG(config->config_table[0].flags,
6036 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6037 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006038 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006039 SET_FLAG(config->config_table[0].flags,
6040 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6041 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006042
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006043 if (is_bcast)
6044 SET_FLAG(config->config_table[0].flags,
6045 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6046
6047 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006048 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006049 config->config_table[0].msb_mac_addr,
6050 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006051 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006052
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006053 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006054 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006055 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6056
6057 /* Wait for a completion */
6058 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006059}
6060
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006061int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006062 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006063{
6064 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006065 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006066 u8 poll = flags & WAIT_RAMROD_POLL;
6067 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006068
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006069 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6070 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006071
6072 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006073 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006074 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006075 if (common)
6076 bnx2x_eq_int(bp);
6077 else {
6078 bnx2x_rx_int(bp->fp, 10);
6079 /* if index is different from 0
6080 * the reply for some commands will
6081 * be on the non default queue
6082 */
6083 if (idx)
6084 bnx2x_rx_int(&bp->fp[idx], 10);
6085 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006086 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006087
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006088 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006089 if (*state_p == state) {
6090#ifdef BNX2X_STOP_ON_ERROR
6091 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6092#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006093 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006094 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006095
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006096 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006097
6098 if (bp->panic)
6099 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006100 }
6101
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006102 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006103 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6104 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006105#ifdef BNX2X_STOP_ON_ERROR
6106 bnx2x_panic();
6107#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006108
Eliezer Tamir49d66772008-02-28 11:53:13 -08006109 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006110}
6111
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006112u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfda52009-10-10 13:46:54 +00006113{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006114 if (CHIP_IS_E1H(bp))
6115 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6116 else if (CHIP_MODE_IS_4_PORT(bp))
6117 return BP_FUNC(bp) * 32 + rel_offset;
6118 else
6119 return BP_VN(bp) * 32 + rel_offset;
Michael Chane665bfda52009-10-10 13:46:54 +00006120}
6121
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006122void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfda52009-10-10 13:46:54 +00006123{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006124 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6125 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6126
6127 /* networking MAC */
6128 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6129 (1 << bp->fp->cl_id), cam_offset , 0);
6130
6131 if (CHIP_IS_E1(bp)) {
6132 /* broadcast MAC */
6133 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6134 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6135 }
6136}
6137static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6138{
6139 int i = 0, old;
6140 struct net_device *dev = bp->dev;
6141 struct netdev_hw_addr *ha;
6142 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6143 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6144
6145 netdev_for_each_mc_addr(ha, dev) {
6146 /* copy mac */
6147 config_cmd->config_table[i].msb_mac_addr =
6148 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6149 config_cmd->config_table[i].middle_mac_addr =
6150 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6151 config_cmd->config_table[i].lsb_mac_addr =
6152 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6153
6154 config_cmd->config_table[i].vlan_id = 0;
6155 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6156 config_cmd->config_table[i].clients_bit_vector =
6157 cpu_to_le32(1 << BP_L_ID(bp));
6158
6159 SET_FLAG(config_cmd->config_table[i].flags,
6160 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6161 T_ETH_MAC_COMMAND_SET);
6162
6163 DP(NETIF_MSG_IFUP,
6164 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6165 config_cmd->config_table[i].msb_mac_addr,
6166 config_cmd->config_table[i].middle_mac_addr,
6167 config_cmd->config_table[i].lsb_mac_addr);
6168 i++;
6169 }
6170 old = config_cmd->hdr.length;
6171 if (old > i) {
6172 for (; i < old; i++) {
6173 if (CAM_IS_INVALID(config_cmd->
6174 config_table[i])) {
6175 /* already invalidated */
6176 break;
6177 }
6178 /* invalidate */
6179 SET_FLAG(config_cmd->config_table[i].flags,
6180 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6181 T_ETH_MAC_COMMAND_INVALIDATE);
6182 }
6183 }
6184
6185 config_cmd->hdr.length = i;
6186 config_cmd->hdr.offset = offset;
6187 config_cmd->hdr.client_id = 0xff;
6188 config_cmd->hdr.reserved1 = 0;
6189
6190 bp->set_mac_pending = 1;
Michael Chane665bfda52009-10-10 13:46:54 +00006191 smp_wmb();
6192
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006193 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6194 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6195}
6196static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6197{
6198 int i;
6199 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6200 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6201 int ramrod_flags = WAIT_RAMROD_COMMON;
6202
6203 bp->set_mac_pending = 1;
6204 smp_wmb();
6205
6206 for (i = 0; i < config_cmd->hdr.length; i++)
6207 SET_FLAG(config_cmd->config_table[i].flags,
6208 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6209 T_ETH_MAC_COMMAND_INVALIDATE);
6210
6211 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6212 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfda52009-10-10 13:46:54 +00006213
6214 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006215 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6216 ramrod_flags);
6217
Michael Chane665bfda52009-10-10 13:46:54 +00006218}
6219
Michael Chan993ac7b2009-10-10 13:46:56 +00006220#ifdef BCM_CNIC
6221/**
6222 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6223 * MAC(s). This function will wait until the ramdord completion
6224 * returns.
6225 *
6226 * @param bp driver handle
6227 * @param set set or clear the CAM entry
6228 *
6229 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6230 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006231int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006232{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006233 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6234 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6235 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6236 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00006237
6238 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006239 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6240 cam_offset, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00006241 return 0;
6242}
6243#endif
6244
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006245static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6246 struct bnx2x_client_init_params *params,
6247 u8 activate,
6248 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006249{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006250 /* Clear the buffer */
6251 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006252
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006253 /* general */
6254 data->general.client_id = params->rxq_params.cl_id;
6255 data->general.statistics_counter_id = params->rxq_params.stat_id;
6256 data->general.statistics_en_flg =
6257 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6258 data->general.activate_flg = activate;
6259 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006260
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006261 /* Rx data */
6262 data->rx.tpa_en_flg =
6263 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6264 data->rx.vmqueue_mode_en_flg = 0;
6265 data->rx.cache_line_alignment_log_size =
6266 params->rxq_params.cache_line_log;
6267 data->rx.enable_dynamic_hc =
6268 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6269 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6270 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6271 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6272
6273 /* We don't set drop flags */
6274 data->rx.drop_ip_cs_err_flg = 0;
6275 data->rx.drop_tcp_cs_err_flg = 0;
6276 data->rx.drop_ttl0_flg = 0;
6277 data->rx.drop_udp_cs_err_flg = 0;
6278
6279 data->rx.inner_vlan_removal_enable_flg =
6280 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6281 data->rx.outer_vlan_removal_enable_flg =
6282 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6283 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6284 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6285 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6286 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6287 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6288 data->rx.bd_page_base.lo =
6289 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6290 data->rx.bd_page_base.hi =
6291 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6292 data->rx.sge_page_base.lo =
6293 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6294 data->rx.sge_page_base.hi =
6295 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6296 data->rx.cqe_page_base.lo =
6297 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6298 data->rx.cqe_page_base.hi =
6299 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6300 data->rx.is_leading_rss =
6301 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6302 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6303
6304 /* Tx data */
6305 data->tx.enforce_security_flg = 0; /* VF specific */
6306 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6307 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6308 data->tx.mtu = 0; /* VF specific */
6309 data->tx.tx_bd_page_base.lo =
6310 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6311 data->tx.tx_bd_page_base.hi =
6312 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6313
6314 /* flow control data */
6315 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6316 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6317 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6318 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6319 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6320 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6321 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6322
6323 data->fc.safc_group_num = params->txq_params.cos;
6324 data->fc.safc_group_en_flg =
6325 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6326 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6327}
6328
6329static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6330{
6331 /* ustorm cxt validation */
6332 cxt->ustorm_ag_context.cdu_usage =
6333 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6334 ETH_CONNECTION_TYPE);
6335 /* xcontext validation */
6336 cxt->xstorm_ag_context.cdu_reserved =
6337 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6338 ETH_CONNECTION_TYPE);
6339}
6340
6341int bnx2x_setup_fw_client(struct bnx2x *bp,
6342 struct bnx2x_client_init_params *params,
6343 u8 activate,
6344 struct client_init_ramrod_data *data,
6345 dma_addr_t data_mapping)
6346{
6347 u16 hc_usec;
6348 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6349 int ramrod_flags = 0, rc;
6350
6351 /* HC and context validation values */
6352 hc_usec = params->txq_params.hc_rate ?
6353 1000000 / params->txq_params.hc_rate : 0;
6354 bnx2x_update_coalesce_sb_index(bp,
6355 params->txq_params.fw_sb_id,
6356 params->txq_params.sb_cq_index,
6357 !(params->txq_params.flags & QUEUE_FLG_HC),
6358 hc_usec);
6359
6360 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6361
6362 hc_usec = params->rxq_params.hc_rate ?
6363 1000000 / params->rxq_params.hc_rate : 0;
6364 bnx2x_update_coalesce_sb_index(bp,
6365 params->rxq_params.fw_sb_id,
6366 params->rxq_params.sb_cq_index,
6367 !(params->rxq_params.flags & QUEUE_FLG_HC),
6368 hc_usec);
6369
6370 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6371 params->rxq_params.cid);
6372
6373 /* zero stats */
6374 if (params->txq_params.flags & QUEUE_FLG_STATS)
6375 storm_memset_xstats_zero(bp, BP_PORT(bp),
6376 params->txq_params.stat_id);
6377
6378 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6379 storm_memset_ustats_zero(bp, BP_PORT(bp),
6380 params->rxq_params.stat_id);
6381 storm_memset_tstats_zero(bp, BP_PORT(bp),
6382 params->rxq_params.stat_id);
6383 }
6384
6385 /* Fill the ramrod data */
6386 bnx2x_fill_cl_init_data(bp, params, activate, data);
6387
6388 /* SETUP ramrod.
6389 *
6390 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6391 * barrier except from mmiowb() is needed to impose a
6392 * proper ordering of memory operations.
6393 */
6394 mmiowb();
6395
6396
6397 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6398 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006399
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006400 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006401 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6402 params->ramrod_params.index,
6403 params->ramrod_params.pstate,
6404 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006405 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006406}
6407
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006408/**
6409 * Configure interrupt mode according to current configuration.
6410 * In case of MSI-X it will also try to enable MSI-X.
6411 *
6412 * @param bp
6413 *
6414 * @return int
6415 */
6416static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006417{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006418 int rc = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006419
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006420 switch (bp->int_mode) {
6421 case INT_MODE_MSI:
6422 bnx2x_enable_msi(bp);
6423 /* falling through... */
6424 case INT_MODE_INTx:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006425 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006426 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006427 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006428 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006429 /* Set number of queues according to bp->multi_mode value */
6430 bnx2x_set_num_queues(bp);
6431
6432 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6433 bp->num_queues);
6434
6435 /* if we can't use MSI-X we only need one fp,
6436 * so try to enable MSI-X with the requested number of fp's
6437 * and fallback to MSI or legacy INTx with one fp
6438 */
6439 rc = bnx2x_enable_msix(bp);
6440 if (rc) {
6441 /* failed to enable MSI-X */
6442 if (bp->multi_mode)
6443 DP(NETIF_MSG_IFUP,
6444 "Multi requested but failed to "
6445 "enable MSI-X (%d), "
6446 "set number of queues to %d\n",
6447 bp->num_queues,
6448 1);
6449 bp->num_queues = 1;
6450
6451 if (!(bp->flags & DISABLE_MSI_FLAG))
6452 bnx2x_enable_msi(bp);
6453 }
6454
Eilon Greensteinca003922009-08-12 22:53:28 -07006455 break;
6456 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006457
6458 return rc;
Eilon Greensteinca003922009-08-12 22:53:28 -07006459}
6460
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006461/* must be called prioir to any HW initializations */
6462static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6463{
6464 return L2_ILT_LINES(bp);
6465}
6466
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006467void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006468{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006469 struct ilt_client_info *ilt_client;
6470 struct bnx2x_ilt *ilt = BP_ILT(bp);
6471 u16 line = 0;
6472
6473 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6474 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6475
6476 /* CDU */
6477 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6478 ilt_client->client_num = ILT_CLIENT_CDU;
6479 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6480 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6481 ilt_client->start = line;
6482 line += L2_ILT_LINES(bp);
6483#ifdef BCM_CNIC
6484 line += CNIC_ILT_LINES;
6485#endif
6486 ilt_client->end = line - 1;
6487
6488 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6489 "flags 0x%x, hw psz %d\n",
6490 ilt_client->start,
6491 ilt_client->end,
6492 ilt_client->page_size,
6493 ilt_client->flags,
6494 ilog2(ilt_client->page_size >> 12));
6495
6496 /* QM */
6497 if (QM_INIT(bp->qm_cid_count)) {
6498 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6499 ilt_client->client_num = ILT_CLIENT_QM;
6500 ilt_client->page_size = QM_ILT_PAGE_SZ;
6501 ilt_client->flags = 0;
6502 ilt_client->start = line;
6503
6504 /* 4 bytes for each cid */
6505 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6506 QM_ILT_PAGE_SZ);
6507
6508 ilt_client->end = line - 1;
6509
6510 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6511 "flags 0x%x, hw psz %d\n",
6512 ilt_client->start,
6513 ilt_client->end,
6514 ilt_client->page_size,
6515 ilt_client->flags,
6516 ilog2(ilt_client->page_size >> 12));
6517
6518 }
6519 /* SRC */
6520 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6521#ifdef BCM_CNIC
6522 ilt_client->client_num = ILT_CLIENT_SRC;
6523 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6524 ilt_client->flags = 0;
6525 ilt_client->start = line;
6526 line += SRC_ILT_LINES;
6527 ilt_client->end = line - 1;
6528
6529 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6530 "flags 0x%x, hw psz %d\n",
6531 ilt_client->start,
6532 ilt_client->end,
6533 ilt_client->page_size,
6534 ilt_client->flags,
6535 ilog2(ilt_client->page_size >> 12));
6536
6537#else
6538 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6539#endif
6540
6541 /* TM */
6542 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6543#ifdef BCM_CNIC
6544 ilt_client->client_num = ILT_CLIENT_TM;
6545 ilt_client->page_size = TM_ILT_PAGE_SZ;
6546 ilt_client->flags = 0;
6547 ilt_client->start = line;
6548 line += TM_ILT_LINES;
6549 ilt_client->end = line - 1;
6550
6551 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6552 "flags 0x%x, hw psz %d\n",
6553 ilt_client->start,
6554 ilt_client->end,
6555 ilt_client->page_size,
6556 ilt_client->flags,
6557 ilog2(ilt_client->page_size >> 12));
6558
6559#else
6560 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6561#endif
6562}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006563
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006564int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6565 int is_leading)
6566{
6567 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006568 int rc;
6569
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006570 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6571 IGU_INT_ENABLE, 0);
6572
6573 params.ramrod_params.pstate = &fp->state;
6574 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6575 params.ramrod_params.index = fp->index;
6576 params.ramrod_params.cid = fp->cid;
6577
6578 if (is_leading)
6579 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6580
6581 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6582
6583 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6584
6585 rc = bnx2x_setup_fw_client(bp, &params, 1,
6586 bnx2x_sp(bp, client_init_data),
6587 bnx2x_sp_mapping(bp, client_init_data));
6588 return rc;
6589}
6590
6591int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
6592{
6593 int rc;
6594
6595 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6596
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006597 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006598 *p->pstate = BNX2X_FP_STATE_HALTING;
6599 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6600 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006601
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006602 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006603 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6604 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006605 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006606 return rc;
6607
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006608 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6609 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6610 p->cl_id, 0);
6611 /* Wait for completion */
6612 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6613 p->pstate, poll_flag);
6614 if (rc) /* timeout */
6615 return rc;
6616
6617
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006618 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006619 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006620
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006621 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006622 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6623 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006624 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006625}
6626
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006627static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006628{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006629 struct bnx2x_client_ramrod_params client_stop = {0};
6630 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006631
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006632 client_stop.index = index;
6633 client_stop.cid = fp->cid;
6634 client_stop.cl_id = fp->cl_id;
6635 client_stop.pstate = &(fp->state);
6636 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006637
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006638 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006639}
6640
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006641
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006642static void bnx2x_reset_func(struct bnx2x *bp)
6643{
6644 int port = BP_PORT(bp);
6645 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006646 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006647 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006648 (CHIP_IS_E2(bp) ?
6649 offsetof(struct hc_status_block_data_e2, common) :
6650 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006651 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6652 int pfid_offset = offsetof(struct pci_entity, pf_id);
6653
6654 /* Disable the function in the FW */
6655 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6656 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6657 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6658 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6659
6660 /* FP SBs */
6661 for_each_queue(bp, i) {
6662 struct bnx2x_fastpath *fp = &bp->fp[i];
6663 REG_WR8(bp,
6664 BAR_CSTRORM_INTMEM +
6665 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6666 + pfunc_offset_fp + pfid_offset,
6667 HC_FUNCTION_DISABLED);
6668 }
6669
6670 /* SP SB */
6671 REG_WR8(bp,
6672 BAR_CSTRORM_INTMEM +
6673 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6674 pfunc_offset_sp + pfid_offset,
6675 HC_FUNCTION_DISABLED);
6676
6677
6678 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6679 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6680 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006681
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006682 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006683 if (bp->common.int_block == INT_BLOCK_HC) {
6684 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6685 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6686 } else {
6687 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6688 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6689 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006690
Michael Chan37b091b2009-10-10 13:46:55 +00006691#ifdef BCM_CNIC
6692 /* Disable Timer scan */
6693 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6694 /*
6695 * Wait for at least 10ms and up to 2 second for the timers scan to
6696 * complete
6697 */
6698 for (i = 0; i < 200; i++) {
6699 msleep(10);
6700 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6701 break;
6702 }
6703#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006704 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006705 bnx2x_clear_func_ilt(bp, func);
6706
6707 /* Timers workaround bug for E2: if this is vnic-3,
6708 * we need to set the entire ilt range for this timers.
6709 */
6710 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6711 struct ilt_client_info ilt_cli;
6712 /* use dummy TM client */
6713 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6714 ilt_cli.start = 0;
6715 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6716 ilt_cli.client_num = ILT_CLIENT_TM;
6717
6718 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6719 }
6720
6721 /* this assumes that reset_port() called before reset_func()*/
6722 if (CHIP_IS_E2(bp))
6723 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006724
6725 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006726}
6727
6728static void bnx2x_reset_port(struct bnx2x *bp)
6729{
6730 int port = BP_PORT(bp);
6731 u32 val;
6732
6733 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6734
6735 /* Do not rcv packets to BRB */
6736 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6737 /* Do not direct rcv packets that are not for MCP to the BRB */
6738 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6739 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6740
6741 /* Configure AEU */
6742 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6743
6744 msleep(100);
6745 /* Check for BRB port occupancy */
6746 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6747 if (val)
6748 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07006749 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006750
6751 /* TODO: Close Doorbell port? */
6752}
6753
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006754static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6755{
6756 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006757 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006758
6759 switch (reset_code) {
6760 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6761 bnx2x_reset_port(bp);
6762 bnx2x_reset_func(bp);
6763 bnx2x_reset_common(bp);
6764 break;
6765
6766 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6767 bnx2x_reset_port(bp);
6768 bnx2x_reset_func(bp);
6769 break;
6770
6771 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6772 bnx2x_reset_func(bp);
6773 break;
6774
6775 default:
6776 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6777 break;
6778 }
6779}
6780
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006781void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006782{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006783 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006784 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006785 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006786
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006787 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006788 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006789 struct bnx2x_fastpath *fp = &bp->fp[i];
6790
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006791 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08006792 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006793
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006794 if (!cnt) {
6795 BNX2X_ERR("timeout waiting for queue[%d]\n",
6796 i);
6797#ifdef BNX2X_STOP_ON_ERROR
6798 bnx2x_panic();
6799 return -EBUSY;
6800#else
6801 break;
6802#endif
6803 }
6804 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006805 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006806 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08006807 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006808 /* Give HW time to discard old tx messages */
6809 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006810
Yitchak Gertner65abd742008-08-25 15:26:24 -07006811 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006812 /* invalidate mc list,
6813 * wait and poll (interrupts are off)
6814 */
6815 bnx2x_invlidate_e1_mc_list(bp);
6816 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006817
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006818 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006819 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6820
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006821 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006822
6823 for (i = 0; i < MC_HASH_SIZE; i++)
6824 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6825 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006826
Michael Chan993ac7b2009-10-10 13:46:56 +00006827#ifdef BCM_CNIC
6828 /* Clear iSCSI L2 MAC */
6829 mutex_lock(&bp->cnic_mutex);
6830 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6831 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6832 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6833 }
6834 mutex_unlock(&bp->cnic_mutex);
6835#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07006836
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006837 if (unload_mode == UNLOAD_NORMAL)
6838 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006839
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006840 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006841 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006842
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006843 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006844 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006845 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006846 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006847 /* The mac address is written to entries 1-4 to
6848 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006849 u8 entry = (BP_E1HVN(bp) + 1)*8;
6850
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006851 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006852 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006853
6854 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6855 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006856 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006857
6858 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006859
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006860 } else
6861 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6862
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006863 /* Close multi and leading connections
6864 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006865 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006866
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006867 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006868#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006869 return;
6870#else
6871 goto unload_error;
6872#endif
6873
6874 rc = bnx2x_func_stop(bp);
6875 if (rc) {
6876 BNX2X_ERR("Function stop failed!\n");
6877#ifdef BNX2X_STOP_ON_ERROR
6878 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006879#else
6880 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006881#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08006882 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006883#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08006884unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006885#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006886 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006887 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006888 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006889 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6890 "%d, %d, %d\n", BP_PATH(bp),
6891 load_count[BP_PATH(bp)][0],
6892 load_count[BP_PATH(bp)][1],
6893 load_count[BP_PATH(bp)][2]);
6894 load_count[BP_PATH(bp)][0]--;
6895 load_count[BP_PATH(bp)][1 + port]--;
6896 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6897 "%d, %d, %d\n", BP_PATH(bp),
6898 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6899 load_count[BP_PATH(bp)][2]);
6900 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006901 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006902 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006903 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6904 else
6905 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6906 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006907
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006908 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6909 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6910 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006911
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006912 /* Disable HW interrupts, NAPI */
6913 bnx2x_netif_stop(bp, 1);
6914
6915 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006916 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006917
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006918 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08006919 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006920
6921 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006922 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006923 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006924
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006925}
6926
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006927void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006928{
6929 u32 val;
6930
6931 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6932
6933 if (CHIP_IS_E1(bp)) {
6934 int port = BP_PORT(bp);
6935 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6936 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6937
6938 val = REG_RD(bp, addr);
6939 val &= ~(0x300);
6940 REG_WR(bp, addr, val);
6941 } else if (CHIP_IS_E1H(bp)) {
6942 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6943 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6944 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6945 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6946 }
6947}
6948
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006949/* Close gates #2, #3 and #4: */
6950static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6951{
6952 u32 val, addr;
6953
6954 /* Gates #2 and #4a are closed/opened for "not E1" only */
6955 if (!CHIP_IS_E1(bp)) {
6956 /* #4 */
6957 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6958 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6959 close ? (val | 0x1) : (val & (~(u32)1)));
6960 /* #2 */
6961 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6962 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6963 close ? (val | 0x1) : (val & (~(u32)1)));
6964 }
6965
6966 /* #3 */
6967 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6968 val = REG_RD(bp, addr);
6969 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6970
6971 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6972 close ? "closing" : "opening");
6973 mmiowb();
6974}
6975
6976#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6977
6978static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6979{
6980 /* Do some magic... */
6981 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6982 *magic_val = val & SHARED_MF_CLP_MAGIC;
6983 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6984}
6985
6986/* Restore the value of the `magic' bit.
6987 *
6988 * @param pdev Device handle.
6989 * @param magic_val Old value of the `magic' bit.
6990 */
6991static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6992{
6993 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006994 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6995 MF_CFG_WR(bp, shared_mf_config.clp_mb,
6996 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
6997}
6998
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006999/**
7000 * Prepares for MCP reset: takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007001 *
7002 * @param bp
7003 * @param magic_val Old value of 'magic' bit.
7004 */
7005static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7006{
7007 u32 shmem;
7008 u32 validity_offset;
7009
7010 DP(NETIF_MSG_HW, "Starting\n");
7011
7012 /* Set `magic' bit in order to save MF config */
7013 if (!CHIP_IS_E1(bp))
7014 bnx2x_clp_reset_prep(bp, magic_val);
7015
7016 /* Get shmem offset */
7017 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7018 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7019
7020 /* Clear validity map flags */
7021 if (shmem > 0)
7022 REG_WR(bp, shmem + validity_offset, 0);
7023}
7024
7025#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7026#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7027
7028/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7029 * depending on the HW type.
7030 *
7031 * @param bp
7032 */
7033static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7034{
7035 /* special handling for emulation and FPGA,
7036 wait 10 times longer */
7037 if (CHIP_REV_IS_SLOW(bp))
7038 msleep(MCP_ONE_TIMEOUT*10);
7039 else
7040 msleep(MCP_ONE_TIMEOUT);
7041}
7042
7043static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7044{
7045 u32 shmem, cnt, validity_offset, val;
7046 int rc = 0;
7047
7048 msleep(100);
7049
7050 /* Get shmem offset */
7051 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7052 if (shmem == 0) {
7053 BNX2X_ERR("Shmem 0 return failure\n");
7054 rc = -ENOTTY;
7055 goto exit_lbl;
7056 }
7057
7058 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7059
7060 /* Wait for MCP to come up */
7061 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7062 /* TBD: its best to check validity map of last port.
7063 * currently checks on port 0.
7064 */
7065 val = REG_RD(bp, shmem + validity_offset);
7066 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7067 shmem + validity_offset, val);
7068
7069 /* check that shared memory is valid. */
7070 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7071 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7072 break;
7073
7074 bnx2x_mcp_wait_one(bp);
7075 }
7076
7077 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7078
7079 /* Check that shared memory is valid. This indicates that MCP is up. */
7080 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7081 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7082 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7083 rc = -ENOTTY;
7084 goto exit_lbl;
7085 }
7086
7087exit_lbl:
7088 /* Restore the `magic' bit value */
7089 if (!CHIP_IS_E1(bp))
7090 bnx2x_clp_reset_done(bp, magic_val);
7091
7092 return rc;
7093}
7094
7095static void bnx2x_pxp_prep(struct bnx2x *bp)
7096{
7097 if (!CHIP_IS_E1(bp)) {
7098 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7099 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7100 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7101 mmiowb();
7102 }
7103}
7104
7105/*
7106 * Reset the whole chip except for:
7107 * - PCIE core
7108 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7109 * one reset bit)
7110 * - IGU
7111 * - MISC (including AEU)
7112 * - GRC
7113 * - RBCN, RBCP
7114 */
7115static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7116{
7117 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7118
7119 not_reset_mask1 =
7120 MISC_REGISTERS_RESET_REG_1_RST_HC |
7121 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7122 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7123
7124 not_reset_mask2 =
7125 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7126 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7127 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7128 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7129 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7130 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7131 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7132 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7133
7134 reset_mask1 = 0xffffffff;
7135
7136 if (CHIP_IS_E1(bp))
7137 reset_mask2 = 0xffff;
7138 else
7139 reset_mask2 = 0x1ffff;
7140
7141 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7142 reset_mask1 & (~not_reset_mask1));
7143 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7144 reset_mask2 & (~not_reset_mask2));
7145
7146 barrier();
7147 mmiowb();
7148
7149 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7150 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7151 mmiowb();
7152}
7153
7154static int bnx2x_process_kill(struct bnx2x *bp)
7155{
7156 int cnt = 1000;
7157 u32 val = 0;
7158 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7159
7160
7161 /* Empty the Tetris buffer, wait for 1s */
7162 do {
7163 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7164 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7165 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7166 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7167 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7168 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7169 ((port_is_idle_0 & 0x1) == 0x1) &&
7170 ((port_is_idle_1 & 0x1) == 0x1) &&
7171 (pgl_exp_rom2 == 0xffffffff))
7172 break;
7173 msleep(1);
7174 } while (cnt-- > 0);
7175
7176 if (cnt <= 0) {
7177 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7178 " are still"
7179 " outstanding read requests after 1s!\n");
7180 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7181 " port_is_idle_0=0x%08x,"
7182 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7183 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7184 pgl_exp_rom2);
7185 return -EAGAIN;
7186 }
7187
7188 barrier();
7189
7190 /* Close gates #2, #3 and #4 */
7191 bnx2x_set_234_gates(bp, true);
7192
7193 /* TBD: Indicate that "process kill" is in progress to MCP */
7194
7195 /* Clear "unprepared" bit */
7196 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7197 barrier();
7198
7199 /* Make sure all is written to the chip before the reset */
7200 mmiowb();
7201
7202 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7203 * PSWHST, GRC and PSWRD Tetris buffer.
7204 */
7205 msleep(1);
7206
7207 /* Prepare to chip reset: */
7208 /* MCP */
7209 bnx2x_reset_mcp_prep(bp, &val);
7210
7211 /* PXP */
7212 bnx2x_pxp_prep(bp);
7213 barrier();
7214
7215 /* reset the chip */
7216 bnx2x_process_kill_chip_reset(bp);
7217 barrier();
7218
7219 /* Recover after reset: */
7220 /* MCP */
7221 if (bnx2x_reset_mcp_comp(bp, val))
7222 return -EAGAIN;
7223
7224 /* PXP */
7225 bnx2x_pxp_prep(bp);
7226
7227 /* Open the gates #2, #3 and #4 */
7228 bnx2x_set_234_gates(bp, false);
7229
7230 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7231 * reset state, re-enable attentions. */
7232
7233 return 0;
7234}
7235
7236static int bnx2x_leader_reset(struct bnx2x *bp)
7237{
7238 int rc = 0;
7239 /* Try to recover after the failure */
7240 if (bnx2x_process_kill(bp)) {
7241 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7242 bp->dev->name);
7243 rc = -EAGAIN;
7244 goto exit_leader_reset;
7245 }
7246
7247 /* Clear "reset is in progress" bit and update the driver state */
7248 bnx2x_set_reset_done(bp);
7249 bp->recovery_state = BNX2X_RECOVERY_DONE;
7250
7251exit_leader_reset:
7252 bp->is_leader = 0;
7253 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7254 smp_wmb();
7255 return rc;
7256}
7257
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007258/* Assumption: runs under rtnl lock. This together with the fact
7259 * that it's called only from bnx2x_reset_task() ensure that it
7260 * will never be called when netif_running(bp->dev) is false.
7261 */
7262static void bnx2x_parity_recover(struct bnx2x *bp)
7263{
7264 DP(NETIF_MSG_HW, "Handling parity\n");
7265 while (1) {
7266 switch (bp->recovery_state) {
7267 case BNX2X_RECOVERY_INIT:
7268 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7269 /* Try to get a LEADER_LOCK HW lock */
7270 if (bnx2x_trylock_hw_lock(bp,
7271 HW_LOCK_RESOURCE_RESERVED_08))
7272 bp->is_leader = 1;
7273
7274 /* Stop the driver */
7275 /* If interface has been removed - break */
7276 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7277 return;
7278
7279 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7280 /* Ensure "is_leader" and "recovery_state"
7281 * update values are seen on other CPUs
7282 */
7283 smp_wmb();
7284 break;
7285
7286 case BNX2X_RECOVERY_WAIT:
7287 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7288 if (bp->is_leader) {
7289 u32 load_counter = bnx2x_get_load_cnt(bp);
7290 if (load_counter) {
7291 /* Wait until all other functions get
7292 * down.
7293 */
7294 schedule_delayed_work(&bp->reset_task,
7295 HZ/10);
7296 return;
7297 } else {
7298 /* If all other functions got down -
7299 * try to bring the chip back to
7300 * normal. In any case it's an exit
7301 * point for a leader.
7302 */
7303 if (bnx2x_leader_reset(bp) ||
7304 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7305 printk(KERN_ERR"%s: Recovery "
7306 "has failed. Power cycle is "
7307 "needed.\n", bp->dev->name);
7308 /* Disconnect this device */
7309 netif_device_detach(bp->dev);
7310 /* Block ifup for all function
7311 * of this ASIC until
7312 * "process kill" or power
7313 * cycle.
7314 */
7315 bnx2x_set_reset_in_progress(bp);
7316 /* Shut down the power */
7317 bnx2x_set_power_state(bp,
7318 PCI_D3hot);
7319 return;
7320 }
7321
7322 return;
7323 }
7324 } else { /* non-leader */
7325 if (!bnx2x_reset_is_done(bp)) {
7326 /* Try to get a LEADER_LOCK HW lock as
7327 * long as a former leader may have
7328 * been unloaded by the user or
7329 * released a leadership by another
7330 * reason.
7331 */
7332 if (bnx2x_trylock_hw_lock(bp,
7333 HW_LOCK_RESOURCE_RESERVED_08)) {
7334 /* I'm a leader now! Restart a
7335 * switch case.
7336 */
7337 bp->is_leader = 1;
7338 break;
7339 }
7340
7341 schedule_delayed_work(&bp->reset_task,
7342 HZ/10);
7343 return;
7344
7345 } else { /* A leader has completed
7346 * the "process kill". It's an exit
7347 * point for a non-leader.
7348 */
7349 bnx2x_nic_load(bp, LOAD_NORMAL);
7350 bp->recovery_state =
7351 BNX2X_RECOVERY_DONE;
7352 smp_wmb();
7353 return;
7354 }
7355 }
7356 default:
7357 return;
7358 }
7359 }
7360}
7361
7362/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7363 * scheduled on a general queue in order to prevent a dead lock.
7364 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007365static void bnx2x_reset_task(struct work_struct *work)
7366{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007367 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007368
7369#ifdef BNX2X_STOP_ON_ERROR
7370 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7371 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007372 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007373 return;
7374#endif
7375
7376 rtnl_lock();
7377
7378 if (!netif_running(bp->dev))
7379 goto reset_task_exit;
7380
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007381 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7382 bnx2x_parity_recover(bp);
7383 else {
7384 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7385 bnx2x_nic_load(bp, LOAD_NORMAL);
7386 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007387
7388reset_task_exit:
7389 rtnl_unlock();
7390}
7391
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007392/* end of nic load/unload */
7393
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007394/*
7395 * Init service functions
7396 */
7397
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007398u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007399{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007400 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7401 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7402 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007403}
7404
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007405static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007406{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007407 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007408
7409 /* Flush all outstanding writes */
7410 mmiowb();
7411
7412 /* Pretend to be function 0 */
7413 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007414 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007415
7416 /* From now we are in the "like-E1" mode */
7417 bnx2x_int_disable(bp);
7418
7419 /* Flush all outstanding writes */
7420 mmiowb();
7421
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007422 /* Restore the original function */
7423 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7424 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007425}
7426
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007427static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007428{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007429 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007430 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007431 else
7432 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007433}
7434
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007435static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007436{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007437 u32 val;
7438
7439 /* Check if there is any driver already loaded */
7440 val = REG_RD(bp, MISC_REG_UNPREPARED);
7441 if (val == 0x1) {
7442 /* Check if it is the UNDI driver
7443 * UNDI driver initializes CID offset for normal bell to 0x7
7444 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007445 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007446 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7447 if (val == 0x7) {
7448 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007449 /* save our pf_num */
7450 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007451 u32 swap_en;
7452 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007453
Eilon Greensteinb4661732009-01-14 06:43:56 +00007454 /* clear the UNDI indication */
7455 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7456
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007457 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7458
7459 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007460 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007461 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007462 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007463 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007464 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007465
7466 /* if UNDI is loaded on the other port */
7467 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7468
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007469 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007470 bnx2x_fw_command(bp,
7471 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007472
7473 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007474 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007475 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007476 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007477 DRV_MSG_SEQ_NUMBER_MASK);
7478 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007479
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007480 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007481 }
7482
Eilon Greensteinb4661732009-01-14 06:43:56 +00007483 /* now it's safe to release the lock */
7484 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7485
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007486 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007487
7488 /* close input traffic and wait for it */
7489 /* Do not rcv packets to BRB */
7490 REG_WR(bp,
7491 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7492 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7493 /* Do not direct rcv packets that are not for MCP to
7494 * the BRB */
7495 REG_WR(bp,
7496 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7497 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7498 /* clear AEU */
7499 REG_WR(bp,
7500 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7501 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7502 msleep(10);
7503
7504 /* save NIG port swap info */
7505 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7506 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007507 /* reset device */
7508 REG_WR(bp,
7509 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007510 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007511 REG_WR(bp,
7512 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7513 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007514 /* take the NIG out of reset and restore swap values */
7515 REG_WR(bp,
7516 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7517 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7518 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7519 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7520
7521 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007522 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007523
7524 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007525 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007526 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007527 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007528 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007529 } else
7530 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007531 }
7532}
7533
7534static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7535{
7536 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007537 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007538
7539 /* Get the chip revision id and number. */
7540 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7541 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7542 id = ((val & 0xffff) << 16);
7543 val = REG_RD(bp, MISC_REG_CHIP_REV);
7544 id |= ((val & 0xf) << 12);
7545 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7546 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007547 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007548 id |= (val & 0xf);
7549 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007550
7551 /* Set doorbell size */
7552 bp->db_size = (1 << BNX2X_DB_SHIFT);
7553
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007554 if (CHIP_IS_E2(bp)) {
7555 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7556 if ((val & 1) == 0)
7557 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7558 else
7559 val = (val >> 1) & 1;
7560 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7561 "2_PORT_MODE");
7562 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7563 CHIP_2_PORT_MODE;
7564
7565 if (CHIP_MODE_IS_4_PORT(bp))
7566 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7567 else
7568 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7569 } else {
7570 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7571 bp->pfid = bp->pf_num; /* 0..7 */
7572 }
7573
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007574 /*
7575 * set base FW non-default (fast path) status block id, this value is
7576 * used to initialize the fw_sb_id saved on the fp/queue structure to
7577 * determine the id used by the FW.
7578 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007579 if (CHIP_IS_E1x(bp))
7580 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7581 else /* E2 */
7582 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7583
7584 bp->link_params.chip_id = bp->common.chip_id;
7585 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007586
Eilon Greenstein1c063282009-02-12 08:36:43 +00007587 val = (REG_RD(bp, 0x2874) & 0x55);
7588 if ((bp->common.chip_id & 0x1) ||
7589 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7590 bp->flags |= ONE_PORT_FLAG;
7591 BNX2X_DEV_INFO("single port device\n");
7592 }
7593
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007594 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7595 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7596 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7597 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7598 bp->common.flash_size, bp->common.flash_size);
7599
7600 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007601 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7602 MISC_REG_GENERIC_CR_1 :
7603 MISC_REG_GENERIC_CR_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007604 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007605 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007606 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7607 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007608
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007609 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007610 BNX2X_DEV_INFO("MCP not active\n");
7611 bp->flags |= NO_MCP_FLAG;
7612 return;
7613 }
7614
7615 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7616 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7617 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007618 BNX2X_ERR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007619
7620 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007621 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007622
7623 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7624 SHARED_HW_CFG_LED_MODE_MASK) >>
7625 SHARED_HW_CFG_LED_MODE_SHIFT);
7626
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007627 bp->link_params.feature_config_flags = 0;
7628 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7629 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7630 bp->link_params.feature_config_flags |=
7631 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7632 else
7633 bp->link_params.feature_config_flags &=
7634 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7635
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007636 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7637 bp->common.bc_ver = val;
7638 BNX2X_DEV_INFO("bc_ver %X\n", val);
7639 if (val < BNX2X_BC_VER) {
7640 /* for now only warn
7641 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007642 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7643 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007644 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007645 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007646 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007647 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7648
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007649 bp->link_params.feature_config_flags |=
7650 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7651 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007652
7653 if (BP_E1HVN(bp) == 0) {
7654 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7655 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7656 } else {
7657 /* no WOL capability for E1HVN != 0 */
7658 bp->flags |= NO_WOL_FLAG;
7659 }
7660 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007661 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007662
7663 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7664 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7665 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7666 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7667
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007668 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7669 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007670}
7671
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007672#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7673#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7674
7675static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7676{
7677 int pfid = BP_FUNC(bp);
7678 int vn = BP_E1HVN(bp);
7679 int igu_sb_id;
7680 u32 val;
7681 u8 fid;
7682
7683 bp->igu_base_sb = 0xff;
7684 bp->igu_sb_cnt = 0;
7685 if (CHIP_INT_MODE_IS_BC(bp)) {
7686 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7687 bp->l2_cid_count);
7688
7689 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7690 FP_SB_MAX_E1x;
7691
7692 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7693 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7694
7695 return;
7696 }
7697
7698 /* IGU in normal mode - read CAM */
7699 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7700 igu_sb_id++) {
7701 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7702 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7703 continue;
7704 fid = IGU_FID(val);
7705 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7706 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7707 continue;
7708 if (IGU_VEC(val) == 0)
7709 /* default status block */
7710 bp->igu_dsb_id = igu_sb_id;
7711 else {
7712 if (bp->igu_base_sb == 0xff)
7713 bp->igu_base_sb = igu_sb_id;
7714 bp->igu_sb_cnt++;
7715 }
7716 }
7717 }
7718 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7719 if (bp->igu_sb_cnt == 0)
7720 BNX2X_ERR("CAM configuration error\n");
7721}
7722
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007723static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7724 u32 switch_cfg)
7725{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007726 int cfg_size = 0, idx, port = BP_PORT(bp);
7727
7728 /* Aggregation of supported attributes of all external phys */
7729 bp->port.supported[0] = 0;
7730 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007731 switch (bp->link_params.num_phys) {
7732 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007733 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7734 cfg_size = 1;
7735 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007736 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007737 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7738 cfg_size = 1;
7739 break;
7740 case 3:
7741 if (bp->link_params.multi_phy_config &
7742 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7743 bp->port.supported[1] =
7744 bp->link_params.phy[EXT_PHY1].supported;
7745 bp->port.supported[0] =
7746 bp->link_params.phy[EXT_PHY2].supported;
7747 } else {
7748 bp->port.supported[0] =
7749 bp->link_params.phy[EXT_PHY1].supported;
7750 bp->port.supported[1] =
7751 bp->link_params.phy[EXT_PHY2].supported;
7752 }
7753 cfg_size = 2;
7754 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007755 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007756
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007757 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007758 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007759 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007760 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007761 dev_info.port_hw_config[port].external_phy_config),
7762 SHMEM_RD(bp,
7763 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007764 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007765 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007766
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007767 switch (switch_cfg) {
7768 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007769 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7770 port*0x10);
7771 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007772 break;
7773
7774 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007775 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7776 port*0x18);
7777 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007778 break;
7779
7780 default:
7781 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007782 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007783 return;
7784 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007785 /* mask what we support according to speed_cap_mask per configuration */
7786 for (idx = 0; idx < cfg_size; idx++) {
7787 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007788 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007789 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007790
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007791 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007792 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007793 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007794
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007795 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007796 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007797 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007798
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007799 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007800 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007801 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007802
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007803 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007804 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007805 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007806 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007807
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007808 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007809 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007810 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007811
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007812 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007813 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007814 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007815
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007816 }
7817
7818 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7819 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007820}
7821
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007822static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007823{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007824 u32 link_config, idx, cfg_size = 0;
7825 bp->port.advertising[0] = 0;
7826 bp->port.advertising[1] = 0;
7827 switch (bp->link_params.num_phys) {
7828 case 1:
7829 case 2:
7830 cfg_size = 1;
7831 break;
7832 case 3:
7833 cfg_size = 2;
7834 break;
7835 }
7836 for (idx = 0; idx < cfg_size; idx++) {
7837 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7838 link_config = bp->port.link_config[idx];
7839 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007840 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007841 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7842 bp->link_params.req_line_speed[idx] =
7843 SPEED_AUTO_NEG;
7844 bp->port.advertising[idx] |=
7845 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007846 } else {
7847 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007848 bp->link_params.req_line_speed[idx] =
7849 SPEED_10000;
7850 bp->port.advertising[idx] |=
7851 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007852 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007853 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007854 }
7855 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007856
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007857 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007858 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7859 bp->link_params.req_line_speed[idx] =
7860 SPEED_10;
7861 bp->port.advertising[idx] |=
7862 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007863 ADVERTISED_TP);
7864 } else {
7865 BNX2X_ERROR("NVRAM config error. "
7866 "Invalid link_config 0x%x"
7867 " speed_cap_mask 0x%x\n",
7868 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007869 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007870 return;
7871 }
7872 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007873
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007874 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007875 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7876 bp->link_params.req_line_speed[idx] =
7877 SPEED_10;
7878 bp->link_params.req_duplex[idx] =
7879 DUPLEX_HALF;
7880 bp->port.advertising[idx] |=
7881 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007882 ADVERTISED_TP);
7883 } else {
7884 BNX2X_ERROR("NVRAM config error. "
7885 "Invalid link_config 0x%x"
7886 " speed_cap_mask 0x%x\n",
7887 link_config,
7888 bp->link_params.speed_cap_mask[idx]);
7889 return;
7890 }
7891 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007892
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007893 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7894 if (bp->port.supported[idx] &
7895 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007896 bp->link_params.req_line_speed[idx] =
7897 SPEED_100;
7898 bp->port.advertising[idx] |=
7899 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007900 ADVERTISED_TP);
7901 } else {
7902 BNX2X_ERROR("NVRAM config error. "
7903 "Invalid link_config 0x%x"
7904 " speed_cap_mask 0x%x\n",
7905 link_config,
7906 bp->link_params.speed_cap_mask[idx]);
7907 return;
7908 }
7909 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007910
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007911 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7912 if (bp->port.supported[idx] &
7913 SUPPORTED_100baseT_Half) {
7914 bp->link_params.req_line_speed[idx] =
7915 SPEED_100;
7916 bp->link_params.req_duplex[idx] =
7917 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007918 bp->port.advertising[idx] |=
7919 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007920 ADVERTISED_TP);
7921 } else {
7922 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007923 "Invalid link_config 0x%x"
7924 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007925 link_config,
7926 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007927 return;
7928 }
7929 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007930
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007931 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007932 if (bp->port.supported[idx] &
7933 SUPPORTED_1000baseT_Full) {
7934 bp->link_params.req_line_speed[idx] =
7935 SPEED_1000;
7936 bp->port.advertising[idx] |=
7937 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007938 ADVERTISED_TP);
7939 } else {
7940 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007941 "Invalid link_config 0x%x"
7942 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007943 link_config,
7944 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007945 return;
7946 }
7947 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007948
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007949 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007950 if (bp->port.supported[idx] &
7951 SUPPORTED_2500baseX_Full) {
7952 bp->link_params.req_line_speed[idx] =
7953 SPEED_2500;
7954 bp->port.advertising[idx] |=
7955 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007956 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007957 } else {
7958 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007959 "Invalid link_config 0x%x"
7960 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007961 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007962 bp->link_params.speed_cap_mask[idx]);
7963 return;
7964 }
7965 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007966
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007967 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7968 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7969 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007970 if (bp->port.supported[idx] &
7971 SUPPORTED_10000baseT_Full) {
7972 bp->link_params.req_line_speed[idx] =
7973 SPEED_10000;
7974 bp->port.advertising[idx] |=
7975 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007976 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007977 } else {
7978 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007979 "Invalid link_config 0x%x"
7980 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007981 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007982 bp->link_params.speed_cap_mask[idx]);
7983 return;
7984 }
7985 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007986
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007987 default:
7988 BNX2X_ERROR("NVRAM config error. "
7989 "BAD link speed link_config 0x%x\n",
7990 link_config);
7991 bp->link_params.req_line_speed[idx] =
7992 SPEED_AUTO_NEG;
7993 bp->port.advertising[idx] =
7994 bp->port.supported[idx];
7995 break;
7996 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007997
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007998 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007999 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008000 if ((bp->link_params.req_flow_ctrl[idx] ==
8001 BNX2X_FLOW_CTRL_AUTO) &&
8002 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8003 bp->link_params.req_flow_ctrl[idx] =
8004 BNX2X_FLOW_CTRL_NONE;
8005 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008006
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008007 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8008 " 0x%x advertising 0x%x\n",
8009 bp->link_params.req_line_speed[idx],
8010 bp->link_params.req_duplex[idx],
8011 bp->link_params.req_flow_ctrl[idx],
8012 bp->port.advertising[idx]);
8013 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008014}
8015
Michael Chane665bfda52009-10-10 13:46:54 +00008016static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8017{
8018 mac_hi = cpu_to_be16(mac_hi);
8019 mac_lo = cpu_to_be32(mac_lo);
8020 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8021 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8022}
8023
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008024static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008025{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008026 int port = BP_PORT(bp);
8027 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008028 u32 config;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008029 u32 ext_phy_type, ext_phy_config;;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008030
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008031 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008032 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008033
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008034 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008035 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008036
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008037 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008038 SHMEM_RD(bp,
8039 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008040 bp->link_params.speed_cap_mask[1] =
8041 SHMEM_RD(bp,
8042 dev_info.port_hw_config[port].speed_capability_mask2);
8043 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008044 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8045
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008046 bp->port.link_config[1] =
8047 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008048
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008049 bp->link_params.multi_phy_config =
8050 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008051 /* If the device is capable of WoL, set the default state according
8052 * to the HW
8053 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008054 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008055 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8056 (config & PORT_FEATURE_WOL_ENABLED));
8057
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008058 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008059 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008060 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008061 bp->link_params.speed_cap_mask[0],
8062 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008063
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008064 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008065 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008066 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008067 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008068
8069 bnx2x_link_settings_requested(bp);
8070
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008071 /*
8072 * If connected directly, work with the internal PHY, otherwise, work
8073 * with the external PHY
8074 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008075 ext_phy_config =
8076 SHMEM_RD(bp,
8077 dev_info.port_hw_config[port].external_phy_config);
8078 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008079 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008080 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008081
8082 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8083 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8084 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008085 XGXS_EXT_PHY_ADDR(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008086
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008087 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8088 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfda52009-10-10 13:46:54 +00008089 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008090 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8091 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008092
8093#ifdef BCM_CNIC
8094 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8095 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8096 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8097#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008098}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008099
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008100static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8101{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008102 int func = BP_ABS_FUNC(bp);
8103 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008104 u32 val, val2;
8105 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008106
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008107 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008108
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008109 if (CHIP_IS_E1x(bp)) {
8110 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008111
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008112 bp->igu_dsb_id = DEF_SB_IGU_ID;
8113 bp->igu_base_sb = 0;
8114 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8115 } else {
8116 bp->common.int_block = INT_BLOCK_IGU;
8117 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8118 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8119 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8120 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8121 } else
8122 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8123
8124 bnx2x_get_igu_cam_info(bp);
8125
8126 }
8127 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8128 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8129
8130 /*
8131 * Initialize MF configuration
8132 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008133
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008134 bp->mf_ov = 0;
8135 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008136 vn = BP_E1HVN(bp);
8137 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8138 if (SHMEM2_HAS(bp, mf_cfg_addr))
8139 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8140 else
8141 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008142 offsetof(struct shmem_region, func_mb) +
8143 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008144 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008145 MF_CFG_RD(bp, func_mf_config[func].config);
8146
8147 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008148 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008149 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008150 bp->mf_mode = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008151 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008152 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008153
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008154 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008155 val = (MF_CFG_RD(bp, func_mf_config[func].
Eilon Greenstein2691d512009-08-12 08:22:08 +00008156 e1hov_tag) &
8157 FUNC_MF_CFG_E1HOV_TAG_MASK);
8158 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008159 bp->mf_ov = val;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008160 BNX2X_DEV_INFO("MF OV for func %d is %d "
Eilon Greenstein2691d512009-08-12 08:22:08 +00008161 "(0x%04x)\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008162 func, bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008163 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008164 BNX2X_ERROR("No valid MF OV for func %d,"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008165 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008166 rc = -EPERM;
8167 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008168 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008169 if (BP_VN(bp)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008170 BNX2X_ERROR("VN %d in single function mode,"
8171 " aborting\n", BP_E1HVN(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00008172 rc = -EPERM;
8173 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008174 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008175 }
8176
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008177 /* adjust igu_sb_cnt to MF for E1x */
8178 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008179 bp->igu_sb_cnt /= E1HVN_MAX;
8180
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008181 /*
8182 * adjust E2 sb count: to be removed when FW will support
8183 * more then 16 L2 clients
8184 */
8185#define MAX_L2_CLIENTS 16
8186 if (CHIP_IS_E2(bp))
8187 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8188 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8189
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008190 if (!BP_NOMCP(bp)) {
8191 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008192
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008193 bp->fw_seq =
8194 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8195 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008196 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8197 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008198
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008199 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008200 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8201 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008202 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8203 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8204 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8205 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8206 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8207 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8208 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8209 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8210 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8211 ETH_ALEN);
8212 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8213 ETH_ALEN);
8214 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008215
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008216 return rc;
8217 }
8218
8219 if (BP_NOMCP(bp)) {
8220 /* only supposed to happen on emulation/FPGA */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008221 BNX2X_ERROR("warning: random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008222 random_ether_addr(bp->dev->dev_addr);
8223 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8224 }
8225
8226 return rc;
8227}
8228
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008229static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8230{
8231 int cnt, i, block_end, rodi;
8232 char vpd_data[BNX2X_VPD_LEN+1];
8233 char str_id_reg[VENDOR_ID_LEN+1];
8234 char str_id_cap[VENDOR_ID_LEN+1];
8235 u8 len;
8236
8237 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8238 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8239
8240 if (cnt < BNX2X_VPD_LEN)
8241 goto out_not_found;
8242
8243 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8244 PCI_VPD_LRDT_RO_DATA);
8245 if (i < 0)
8246 goto out_not_found;
8247
8248
8249 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8250 pci_vpd_lrdt_size(&vpd_data[i]);
8251
8252 i += PCI_VPD_LRDT_TAG_SIZE;
8253
8254 if (block_end > BNX2X_VPD_LEN)
8255 goto out_not_found;
8256
8257 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8258 PCI_VPD_RO_KEYWORD_MFR_ID);
8259 if (rodi < 0)
8260 goto out_not_found;
8261
8262 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8263
8264 if (len != VENDOR_ID_LEN)
8265 goto out_not_found;
8266
8267 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8268
8269 /* vendor specific info */
8270 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8271 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8272 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8273 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8274
8275 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8276 PCI_VPD_RO_KEYWORD_VENDOR0);
8277 if (rodi >= 0) {
8278 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8279
8280 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8281
8282 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8283 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8284 bp->fw_ver[len] = ' ';
8285 }
8286 }
8287 return;
8288 }
8289out_not_found:
8290 return;
8291}
8292
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008293static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8294{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008295 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008296 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008297 int rc;
8298
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008299 /* Disable interrupt handling until HW is initialized */
8300 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008301 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008302
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008303 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008304 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008305 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008306#ifdef BCM_CNIC
8307 mutex_init(&bp->cnic_mutex);
8308#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008309
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008310 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008311 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008312
8313 rc = bnx2x_get_hwinfo(bp);
8314
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008315 if (!rc)
8316 rc = bnx2x_alloc_mem_bp(bp);
8317
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008318 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008319
8320 func = BP_FUNC(bp);
8321
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008322 /* need to reset chip if undi was active */
8323 if (!BP_NOMCP(bp))
8324 bnx2x_undi_unload(bp);
8325
8326 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008327 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008328
8329 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008330 dev_err(&bp->pdev->dev, "MCP disabled, "
8331 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008332
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008333 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008334 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8335 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008336 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8337 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008338 multi_mode = ETH_RSS_MODE_DISABLED;
8339 }
8340 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008341 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008342
Dmitry Kravkov4fd89b72010-04-01 19:45:34 -07008343 bp->dev->features |= NETIF_F_GRO;
8344
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008345 /* Set TPA flags */
8346 if (disable_tpa) {
8347 bp->flags &= ~TPA_ENABLE_FLAG;
8348 bp->dev->features &= ~NETIF_F_LRO;
8349 } else {
8350 bp->flags |= TPA_ENABLE_FLAG;
8351 bp->dev->features |= NETIF_F_LRO;
8352 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008353 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008354
Eilon Greensteina18f5122009-08-12 08:23:26 +00008355 if (CHIP_IS_E1(bp))
8356 bp->dropless_fc = 0;
8357 else
8358 bp->dropless_fc = dropless_fc;
8359
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008360 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008361
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008362 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008363
8364 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008365
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008366 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008367 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8368 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008369
Eilon Greenstein87942b42009-02-12 08:36:49 +00008370 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8371 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008372
8373 init_timer(&bp->timer);
8374 bp->timer.expires = jiffies + bp->current_interval;
8375 bp->timer.data = (unsigned long) bp;
8376 bp->timer.function = bnx2x_timer;
8377
8378 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008379}
8380
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008381
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008382/****************************************************************************
8383* General service functions
8384****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008385
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008386/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008387static int bnx2x_open(struct net_device *dev)
8388{
8389 struct bnx2x *bp = netdev_priv(dev);
8390
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008391 netif_carrier_off(dev);
8392
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008393 bnx2x_set_power_state(bp, PCI_D0);
8394
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008395 if (!bnx2x_reset_is_done(bp)) {
8396 do {
8397 /* Reset MCP mail box sequence if there is on going
8398 * recovery
8399 */
8400 bp->fw_seq = 0;
8401
8402 /* If it's the first function to load and reset done
8403 * is still not cleared it may mean that. We don't
8404 * check the attention state here because it may have
8405 * already been cleared by a "common" reset but we
8406 * shell proceed with "process kill" anyway.
8407 */
8408 if ((bnx2x_get_load_cnt(bp) == 0) &&
8409 bnx2x_trylock_hw_lock(bp,
8410 HW_LOCK_RESOURCE_RESERVED_08) &&
8411 (!bnx2x_leader_reset(bp))) {
8412 DP(NETIF_MSG_HW, "Recovered in open\n");
8413 break;
8414 }
8415
8416 bnx2x_set_power_state(bp, PCI_D3hot);
8417
8418 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8419 " completed yet. Try again later. If u still see this"
8420 " message after a few retries then power cycle is"
8421 " required.\n", bp->dev->name);
8422
8423 return -EAGAIN;
8424 } while (0);
8425 }
8426
8427 bp->recovery_state = BNX2X_RECOVERY_DONE;
8428
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008429 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008430}
8431
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008432/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008433static int bnx2x_close(struct net_device *dev)
8434{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008435 struct bnx2x *bp = netdev_priv(dev);
8436
8437 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008438 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008439 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008440
8441 return 0;
8442}
8443
Eilon Greensteinf5372252009-02-12 08:38:30 +00008444/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008445void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008446{
8447 struct bnx2x *bp = netdev_priv(dev);
8448 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8449 int port = BP_PORT(bp);
8450
8451 if (bp->state != BNX2X_STATE_OPEN) {
8452 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8453 return;
8454 }
8455
8456 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8457
8458 if (dev->flags & IFF_PROMISC)
8459 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008460 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00008461 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8462 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008463 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008464 else { /* some multicasts */
8465 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008466 /*
8467 * set mc list, do not wait as wait implies sleep
8468 * and set_rx_mode can be invoked from non-sleepable
8469 * context
8470 */
8471 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8472 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8473 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008474
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008475 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008476 } else { /* E1H */
8477 /* Accept one or more multicasts */
Jiri Pirko22bedad2010-04-01 21:22:57 +00008478 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008479 u32 mc_filter[MC_HASH_SIZE];
8480 u32 crc, bit, regidx;
8481 int i;
8482
8483 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8484
Jiri Pirko22bedad2010-04-01 21:22:57 +00008485 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07008486 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008487 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008488
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008489 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8490 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008491 bit = (crc >> 24) & 0xff;
8492 regidx = bit >> 5;
8493 bit &= 0x1f;
8494 mc_filter[regidx] |= (1 << bit);
8495 }
8496
8497 for (i = 0; i < MC_HASH_SIZE; i++)
8498 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8499 mc_filter[i]);
8500 }
8501 }
8502
8503 bp->rx_mode = rx_mode;
8504 bnx2x_set_storm_rx_mode(bp);
8505}
8506
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008507/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008508static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8509 int devad, u16 addr)
8510{
8511 struct bnx2x *bp = netdev_priv(netdev);
8512 u16 value;
8513 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008514
8515 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8516 prtad, devad, addr);
8517
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008518 /* The HW expects different devad if CL22 is used */
8519 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8520
8521 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008522 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008523 bnx2x_release_phy_lock(bp);
8524 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8525
8526 if (!rc)
8527 rc = value;
8528 return rc;
8529}
8530
8531/* called with rtnl_lock */
8532static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8533 u16 addr, u16 value)
8534{
8535 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008536 int rc;
8537
8538 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8539 " value 0x%x\n", prtad, devad, addr, value);
8540
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008541 /* The HW expects different devad if CL22 is used */
8542 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8543
8544 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008545 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008546 bnx2x_release_phy_lock(bp);
8547 return rc;
8548}
8549
8550/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008551static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8552{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008553 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008554 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008555
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008556 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8557 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008558
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008559 if (!netif_running(dev))
8560 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008561
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008562 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008563}
8564
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008565#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008566static void poll_bnx2x(struct net_device *dev)
8567{
8568 struct bnx2x *bp = netdev_priv(dev);
8569
8570 disable_irq(bp->pdev->irq);
8571 bnx2x_interrupt(bp->pdev->irq, dev);
8572 enable_irq(bp->pdev->irq);
8573}
8574#endif
8575
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008576static const struct net_device_ops bnx2x_netdev_ops = {
8577 .ndo_open = bnx2x_open,
8578 .ndo_stop = bnx2x_close,
8579 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008580 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008581 .ndo_set_mac_address = bnx2x_change_mac_addr,
8582 .ndo_validate_addr = eth_validate_addr,
8583 .ndo_do_ioctl = bnx2x_ioctl,
8584 .ndo_change_mtu = bnx2x_change_mtu,
8585 .ndo_tx_timeout = bnx2x_tx_timeout,
8586#ifdef BCM_VLAN
8587 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
8588#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008589#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008590 .ndo_poll_controller = poll_bnx2x,
8591#endif
8592};
8593
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008594static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8595 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008596{
8597 struct bnx2x *bp;
8598 int rc;
8599
8600 SET_NETDEV_DEV(dev, &pdev->dev);
8601 bp = netdev_priv(dev);
8602
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008603 bp->dev = dev;
8604 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008605 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008606 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008607
8608 rc = pci_enable_device(pdev);
8609 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008610 dev_err(&bp->pdev->dev,
8611 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008612 goto err_out;
8613 }
8614
8615 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008616 dev_err(&bp->pdev->dev,
8617 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008618 rc = -ENODEV;
8619 goto err_out_disable;
8620 }
8621
8622 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008623 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8624 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008625 rc = -ENODEV;
8626 goto err_out_disable;
8627 }
8628
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008629 if (atomic_read(&pdev->enable_cnt) == 1) {
8630 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8631 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008632 dev_err(&bp->pdev->dev,
8633 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008634 goto err_out_disable;
8635 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008636
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008637 pci_set_master(pdev);
8638 pci_save_state(pdev);
8639 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008640
8641 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8642 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008643 dev_err(&bp->pdev->dev,
8644 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008645 rc = -EIO;
8646 goto err_out_release;
8647 }
8648
8649 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8650 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008651 dev_err(&bp->pdev->dev,
8652 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008653 rc = -EIO;
8654 goto err_out_release;
8655 }
8656
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008657 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008658 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008659 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008660 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8661 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008662 rc = -EIO;
8663 goto err_out_release;
8664 }
8665
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008666 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008667 dev_err(&bp->pdev->dev,
8668 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008669 rc = -EIO;
8670 goto err_out_release;
8671 }
8672
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008673 dev->mem_start = pci_resource_start(pdev, 0);
8674 dev->base_addr = dev->mem_start;
8675 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008676
8677 dev->irq = pdev->irq;
8678
Arjan van de Ven275f1652008-10-20 21:42:39 -07008679 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008680 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008681 dev_err(&bp->pdev->dev,
8682 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008683 rc = -ENOMEM;
8684 goto err_out_release;
8685 }
8686
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008687 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008688 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008689 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008690 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008691 dev_err(&bp->pdev->dev,
8692 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008693 rc = -ENOMEM;
8694 goto err_out_unmap;
8695 }
8696
8697 bnx2x_set_power_state(bp, PCI_D0);
8698
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008699 /* clean indirect addresses */
8700 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8701 PCICFG_VENDOR_ID_OFFSET);
8702 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8703 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8704 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8705 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008706
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008707 /* Reset the load counter */
8708 bnx2x_clear_load_cnt(bp);
8709
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008710 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008711
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008712 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008713 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008714 dev->features |= NETIF_F_SG;
8715 dev->features |= NETIF_F_HW_CSUM;
8716 if (bp->flags & USING_DAC_FLAG)
8717 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008718 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8719 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008720#ifdef BCM_VLAN
8721 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08008722 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008723
8724 dev->vlan_features |= NETIF_F_SG;
8725 dev->vlan_features |= NETIF_F_HW_CSUM;
8726 if (bp->flags & USING_DAC_FLAG)
8727 dev->vlan_features |= NETIF_F_HIGHDMA;
8728 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8729 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008730#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008731
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008732 /* get_port_hwinfo() will set prtad and mmds properly */
8733 bp->mdio.prtad = MDIO_PRTAD_NONE;
8734 bp->mdio.mmds = 0;
8735 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8736 bp->mdio.dev = dev;
8737 bp->mdio.mdio_read = bnx2x_mdio_read;
8738 bp->mdio.mdio_write = bnx2x_mdio_write;
8739
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008740 return 0;
8741
8742err_out_unmap:
8743 if (bp->regview) {
8744 iounmap(bp->regview);
8745 bp->regview = NULL;
8746 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008747 if (bp->doorbells) {
8748 iounmap(bp->doorbells);
8749 bp->doorbells = NULL;
8750 }
8751
8752err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008753 if (atomic_read(&pdev->enable_cnt) == 1)
8754 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008755
8756err_out_disable:
8757 pci_disable_device(pdev);
8758 pci_set_drvdata(pdev, NULL);
8759
8760err_out:
8761 return rc;
8762}
8763
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008764static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8765 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08008766{
8767 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8768
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008769 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8770
8771 /* return value of 1=2.5GHz 2=5GHz */
8772 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08008773}
8774
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008775static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008776{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008777 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008778 struct bnx2x_fw_file_hdr *fw_hdr;
8779 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008780 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008781 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008782 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008783 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008784
8785 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8786 return -EINVAL;
8787
8788 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8789 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8790
8791 /* Make sure none of the offsets and sizes make us read beyond
8792 * the end of the firmware data */
8793 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8794 offset = be32_to_cpu(sections[i].offset);
8795 len = be32_to_cpu(sections[i].len);
8796 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008797 dev_err(&bp->pdev->dev,
8798 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008799 return -EINVAL;
8800 }
8801 }
8802
8803 /* Likewise for the init_ops offsets */
8804 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8805 ops_offsets = (u16 *)(firmware->data + offset);
8806 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8807
8808 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8809 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008810 dev_err(&bp->pdev->dev,
8811 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008812 return -EINVAL;
8813 }
8814 }
8815
8816 /* Check FW version */
8817 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8818 fw_ver = firmware->data + offset;
8819 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8820 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8821 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8822 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008823 dev_err(&bp->pdev->dev,
8824 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008825 fw_ver[0], fw_ver[1], fw_ver[2],
8826 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8827 BCM_5710_FW_MINOR_VERSION,
8828 BCM_5710_FW_REVISION_VERSION,
8829 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008830 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008831 }
8832
8833 return 0;
8834}
8835
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008836static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008837{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008838 const __be32 *source = (const __be32 *)_source;
8839 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008840 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008841
8842 for (i = 0; i < n/4; i++)
8843 target[i] = be32_to_cpu(source[i]);
8844}
8845
8846/*
8847 Ops array is stored in the following format:
8848 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8849 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008850static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008851{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008852 const __be32 *source = (const __be32 *)_source;
8853 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008854 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008855
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008856 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008857 tmp = be32_to_cpu(source[j]);
8858 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008859 target[i].offset = tmp & 0xffffff;
8860 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008861 }
8862}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008863
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008864/**
8865 * IRO array is stored in the following format:
8866 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8867 */
8868static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8869{
8870 const __be32 *source = (const __be32 *)_source;
8871 struct iro *target = (struct iro *)_target;
8872 u32 i, j, tmp;
8873
8874 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8875 target[i].base = be32_to_cpu(source[j]);
8876 j++;
8877 tmp = be32_to_cpu(source[j]);
8878 target[i].m1 = (tmp >> 16) & 0xffff;
8879 target[i].m2 = tmp & 0xffff;
8880 j++;
8881 tmp = be32_to_cpu(source[j]);
8882 target[i].m3 = (tmp >> 16) & 0xffff;
8883 target[i].size = tmp & 0xffff;
8884 j++;
8885 }
8886}
8887
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008888static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008889{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008890 const __be16 *source = (const __be16 *)_source;
8891 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008892 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008893
8894 for (i = 0; i < n/2; i++)
8895 target[i] = be16_to_cpu(source[i]);
8896}
8897
Joe Perches7995c642010-02-17 15:01:52 +00008898#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8899do { \
8900 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8901 bp->arr = kmalloc(len, GFP_KERNEL); \
8902 if (!bp->arr) { \
8903 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8904 goto lbl; \
8905 } \
8906 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8907 (u8 *)bp->arr, len); \
8908} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008909
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008910int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008911{
Ben Hutchings45229b42009-11-07 11:53:39 +00008912 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008913 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00008914 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008915
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008916 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008917 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008918 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008919 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008920 else if (CHIP_IS_E2(bp))
8921 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008922 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008923 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008924 return -EINVAL;
8925 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008926
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008927 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008928
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008929 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008930 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008931 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008932 goto request_firmware_exit;
8933 }
8934
8935 rc = bnx2x_check_firmware(bp);
8936 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008937 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008938 goto request_firmware_exit;
8939 }
8940
8941 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8942
8943 /* Initialize the pointers to the init arrays */
8944 /* Blob */
8945 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8946
8947 /* Opcodes */
8948 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8949
8950 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008951 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8952 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008953
8954 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00008955 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8956 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8957 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8958 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8959 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8960 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8961 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8962 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8963 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8964 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8965 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8966 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8967 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8968 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8969 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8970 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008971 /* IRO */
8972 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008973
8974 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008975
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008976iro_alloc_err:
8977 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008978init_offsets_alloc_err:
8979 kfree(bp->init_ops);
8980init_ops_alloc_err:
8981 kfree(bp->init_data);
8982request_firmware_exit:
8983 release_firmware(bp->firmware);
8984
8985 return rc;
8986}
8987
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008988static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8989{
8990 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008991
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008992#ifdef BCM_CNIC
8993 cid_count += CNIC_CID_MAX;
8994#endif
8995 return roundup(cid_count, QM_CID_ROUND);
8996}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008997
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008998static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8999 const struct pci_device_id *ent)
9000{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009001 struct net_device *dev = NULL;
9002 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009003 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009004 int rc, cid_count;
9005
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009006 switch (ent->driver_data) {
9007 case BCM57710:
9008 case BCM57711:
9009 case BCM57711E:
9010 cid_count = FP_SB_MAX_E1x;
9011 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009012
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009013 case BCM57712:
9014 case BCM57712E:
9015 cid_count = FP_SB_MAX_E2;
9016 break;
9017
9018 default:
9019 pr_err("Unknown board_type (%ld), aborting\n",
9020 ent->driver_data);
9021 return ENODEV;
9022 }
9023
9024 cid_count += CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009025
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009026 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009027 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009028 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009029 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009030 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009031 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009032
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009033 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009034 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009035
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009036 pci_set_drvdata(pdev, dev);
9037
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009038 bp->l2_cid_count = cid_count;
9039
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009040 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009041 if (rc < 0) {
9042 free_netdev(dev);
9043 return rc;
9044 }
9045
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009046 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009047 if (rc)
9048 goto init_one_exit;
9049
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009050 /* calc qm_cid_count */
9051 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9052
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009053 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009054 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009055 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009056 goto init_one_exit;
9057 }
9058
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009059 /* Configure interupt mode: try to enable MSI-X/MSI if
9060 * needed, set bp->num_queues appropriately.
9061 */
9062 bnx2x_set_int_mode(bp);
9063
9064 /* Add all NAPI objects */
9065 bnx2x_add_all_napi(bp);
9066
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009067 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009068
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009069 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9070 " IRQ %d, ", board_info[ent->driver_data].name,
9071 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009072 pcie_width,
9073 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9074 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9075 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009076 dev->base_addr, bp->pdev->irq);
9077 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009078
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009079 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009080
9081init_one_exit:
9082 if (bp->regview)
9083 iounmap(bp->regview);
9084
9085 if (bp->doorbells)
9086 iounmap(bp->doorbells);
9087
9088 free_netdev(dev);
9089
9090 if (atomic_read(&pdev->enable_cnt) == 1)
9091 pci_release_regions(pdev);
9092
9093 pci_disable_device(pdev);
9094 pci_set_drvdata(pdev, NULL);
9095
9096 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009097}
9098
9099static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9100{
9101 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009102 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009103
Eliezer Tamir228241e2008-02-28 11:56:57 -08009104 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009105 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009106 return;
9107 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009108 bp = netdev_priv(dev);
9109
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009110 unregister_netdev(dev);
9111
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009112 /* Delete all NAPI objects */
9113 bnx2x_del_all_napi(bp);
9114
9115 /* Disable MSI/MSI-X */
9116 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009117
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009118 /* Make sure RESET task is not scheduled before continuing */
9119 cancel_delayed_work_sync(&bp->reset_task);
9120
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009121 if (bp->regview)
9122 iounmap(bp->regview);
9123
9124 if (bp->doorbells)
9125 iounmap(bp->doorbells);
9126
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009127 bnx2x_free_mem_bp(bp);
9128
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009129 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009130
9131 if (atomic_read(&pdev->enable_cnt) == 1)
9132 pci_release_regions(pdev);
9133
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009134 pci_disable_device(pdev);
9135 pci_set_drvdata(pdev, NULL);
9136}
9137
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009138static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9139{
9140 int i;
9141
9142 bp->state = BNX2X_STATE_ERROR;
9143
9144 bp->rx_mode = BNX2X_RX_MODE_NONE;
9145
9146 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009147 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009148
9149 del_timer_sync(&bp->timer);
9150 bp->stats_state = STATS_STATE_DISABLED;
9151 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9152
9153 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009154 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009155
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009156 /* Free SKBs, SGEs, TPA pool and driver internals */
9157 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009158
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00009159 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009160 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009161
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009162 bnx2x_free_mem(bp);
9163
9164 bp->state = BNX2X_STATE_CLOSED;
9165
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009166 return 0;
9167}
9168
9169static void bnx2x_eeh_recover(struct bnx2x *bp)
9170{
9171 u32 val;
9172
9173 mutex_init(&bp->port.phy_mutex);
9174
9175 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9176 bp->link_params.shmem_base = bp->common.shmem_base;
9177 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9178
9179 if (!bp->common.shmem_base ||
9180 (bp->common.shmem_base < 0xA0000) ||
9181 (bp->common.shmem_base >= 0xC0000)) {
9182 BNX2X_DEV_INFO("MCP not active\n");
9183 bp->flags |= NO_MCP_FLAG;
9184 return;
9185 }
9186
9187 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9188 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9189 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9190 BNX2X_ERR("BAD MCP validity signature\n");
9191
9192 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009193 bp->fw_seq =
9194 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9195 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009196 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9197 }
9198}
9199
Wendy Xiong493adb12008-06-23 20:36:22 -07009200/**
9201 * bnx2x_io_error_detected - called when PCI error is detected
9202 * @pdev: Pointer to PCI device
9203 * @state: The current pci connection state
9204 *
9205 * This function is called after a PCI bus error affecting
9206 * this device has been detected.
9207 */
9208static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9209 pci_channel_state_t state)
9210{
9211 struct net_device *dev = pci_get_drvdata(pdev);
9212 struct bnx2x *bp = netdev_priv(dev);
9213
9214 rtnl_lock();
9215
9216 netif_device_detach(dev);
9217
Dean Nelson07ce50e2009-07-31 09:13:25 +00009218 if (state == pci_channel_io_perm_failure) {
9219 rtnl_unlock();
9220 return PCI_ERS_RESULT_DISCONNECT;
9221 }
9222
Wendy Xiong493adb12008-06-23 20:36:22 -07009223 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009224 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009225
9226 pci_disable_device(pdev);
9227
9228 rtnl_unlock();
9229
9230 /* Request a slot reset */
9231 return PCI_ERS_RESULT_NEED_RESET;
9232}
9233
9234/**
9235 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9236 * @pdev: Pointer to PCI device
9237 *
9238 * Restart the card from scratch, as if from a cold-boot.
9239 */
9240static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9241{
9242 struct net_device *dev = pci_get_drvdata(pdev);
9243 struct bnx2x *bp = netdev_priv(dev);
9244
9245 rtnl_lock();
9246
9247 if (pci_enable_device(pdev)) {
9248 dev_err(&pdev->dev,
9249 "Cannot re-enable PCI device after reset\n");
9250 rtnl_unlock();
9251 return PCI_ERS_RESULT_DISCONNECT;
9252 }
9253
9254 pci_set_master(pdev);
9255 pci_restore_state(pdev);
9256
9257 if (netif_running(dev))
9258 bnx2x_set_power_state(bp, PCI_D0);
9259
9260 rtnl_unlock();
9261
9262 return PCI_ERS_RESULT_RECOVERED;
9263}
9264
9265/**
9266 * bnx2x_io_resume - called when traffic can start flowing again
9267 * @pdev: Pointer to PCI device
9268 *
9269 * This callback is called when the error recovery driver tells us that
9270 * its OK to resume normal operation.
9271 */
9272static void bnx2x_io_resume(struct pci_dev *pdev)
9273{
9274 struct net_device *dev = pci_get_drvdata(pdev);
9275 struct bnx2x *bp = netdev_priv(dev);
9276
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009277 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009278 printk(KERN_ERR "Handling parity error recovery. "
9279 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009280 return;
9281 }
9282
Wendy Xiong493adb12008-06-23 20:36:22 -07009283 rtnl_lock();
9284
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009285 bnx2x_eeh_recover(bp);
9286
Wendy Xiong493adb12008-06-23 20:36:22 -07009287 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009288 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009289
9290 netif_device_attach(dev);
9291
9292 rtnl_unlock();
9293}
9294
9295static struct pci_error_handlers bnx2x_err_handler = {
9296 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009297 .slot_reset = bnx2x_io_slot_reset,
9298 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009299};
9300
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009301static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009302 .name = DRV_MODULE_NAME,
9303 .id_table = bnx2x_pci_tbl,
9304 .probe = bnx2x_init_one,
9305 .remove = __devexit_p(bnx2x_remove_one),
9306 .suspend = bnx2x_suspend,
9307 .resume = bnx2x_resume,
9308 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009309};
9310
9311static int __init bnx2x_init(void)
9312{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009313 int ret;
9314
Joe Perches7995c642010-02-17 15:01:52 +00009315 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009316
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009317 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9318 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009319 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009320 return -ENOMEM;
9321 }
9322
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009323 ret = pci_register_driver(&bnx2x_pci_driver);
9324 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009325 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009326 destroy_workqueue(bnx2x_wq);
9327 }
9328 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009329}
9330
9331static void __exit bnx2x_cleanup(void)
9332{
9333 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009334
9335 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009336}
9337
9338module_init(bnx2x_init);
9339module_exit(bnx2x_cleanup);
9340
Michael Chan993ac7b2009-10-10 13:46:56 +00009341#ifdef BCM_CNIC
9342
9343/* count denotes the number of new completions we have seen */
9344static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9345{
9346 struct eth_spe *spe;
9347
9348#ifdef BNX2X_STOP_ON_ERROR
9349 if (unlikely(bp->panic))
9350 return;
9351#endif
9352
9353 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009354 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009355 bp->cnic_spq_pending -= count;
9356
Michael Chan993ac7b2009-10-10 13:46:56 +00009357
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009358 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9359 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9360 & SPE_HDR_CONN_TYPE) >>
9361 SPE_HDR_CONN_TYPE_SHIFT;
9362
9363 /* Set validation for iSCSI L2 client before sending SETUP
9364 * ramrod
9365 */
9366 if (type == ETH_CONNECTION_TYPE) {
9367 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9368 hdr.conn_and_cmd_data) >>
9369 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9370
9371 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9372 bnx2x_set_ctx_validation(&bp->context.
9373 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9374 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9375 }
9376
9377 /* There may be not more than 8 L2 and COMMON SPEs and not more
9378 * than 8 L5 SPEs in the air.
9379 */
9380 if ((type == NONE_CONNECTION_TYPE) ||
9381 (type == ETH_CONNECTION_TYPE)) {
9382 if (!atomic_read(&bp->spq_left))
9383 break;
9384 else
9385 atomic_dec(&bp->spq_left);
9386 } else if (type == ISCSI_CONNECTION_TYPE) {
9387 if (bp->cnic_spq_pending >=
9388 bp->cnic_eth_dev.max_kwqe_pending)
9389 break;
9390 else
9391 bp->cnic_spq_pending++;
9392 } else {
9393 BNX2X_ERR("Unknown SPE type: %d\n", type);
9394 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009395 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009396 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009397
9398 spe = bnx2x_sp_get_next(bp);
9399 *spe = *bp->cnic_kwq_cons;
9400
Michael Chan993ac7b2009-10-10 13:46:56 +00009401 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9402 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9403
9404 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9405 bp->cnic_kwq_cons = bp->cnic_kwq;
9406 else
9407 bp->cnic_kwq_cons++;
9408 }
9409 bnx2x_sp_prod_update(bp);
9410 spin_unlock_bh(&bp->spq_lock);
9411}
9412
9413static int bnx2x_cnic_sp_queue(struct net_device *dev,
9414 struct kwqe_16 *kwqes[], u32 count)
9415{
9416 struct bnx2x *bp = netdev_priv(dev);
9417 int i;
9418
9419#ifdef BNX2X_STOP_ON_ERROR
9420 if (unlikely(bp->panic))
9421 return -EIO;
9422#endif
9423
9424 spin_lock_bh(&bp->spq_lock);
9425
9426 for (i = 0; i < count; i++) {
9427 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9428
9429 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9430 break;
9431
9432 *bp->cnic_kwq_prod = *spe;
9433
9434 bp->cnic_kwq_pending++;
9435
9436 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9437 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009438 spe->data.update_data_addr.hi,
9439 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009440 bp->cnic_kwq_pending);
9441
9442 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9443 bp->cnic_kwq_prod = bp->cnic_kwq;
9444 else
9445 bp->cnic_kwq_prod++;
9446 }
9447
9448 spin_unlock_bh(&bp->spq_lock);
9449
9450 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9451 bnx2x_cnic_sp_post(bp, 0);
9452
9453 return i;
9454}
9455
9456static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9457{
9458 struct cnic_ops *c_ops;
9459 int rc = 0;
9460
9461 mutex_lock(&bp->cnic_mutex);
9462 c_ops = bp->cnic_ops;
9463 if (c_ops)
9464 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9465 mutex_unlock(&bp->cnic_mutex);
9466
9467 return rc;
9468}
9469
9470static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9471{
9472 struct cnic_ops *c_ops;
9473 int rc = 0;
9474
9475 rcu_read_lock();
9476 c_ops = rcu_dereference(bp->cnic_ops);
9477 if (c_ops)
9478 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9479 rcu_read_unlock();
9480
9481 return rc;
9482}
9483
9484/*
9485 * for commands that have no data
9486 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009487int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00009488{
9489 struct cnic_ctl_info ctl = {0};
9490
9491 ctl.cmd = cmd;
9492
9493 return bnx2x_cnic_ctl_send(bp, &ctl);
9494}
9495
9496static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9497{
9498 struct cnic_ctl_info ctl;
9499
9500 /* first we tell CNIC and only then we count this as a completion */
9501 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9502 ctl.data.comp.cid = cid;
9503
9504 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009505 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009506}
9507
9508static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9509{
9510 struct bnx2x *bp = netdev_priv(dev);
9511 int rc = 0;
9512
9513 switch (ctl->cmd) {
9514 case DRV_CTL_CTXTBL_WR_CMD: {
9515 u32 index = ctl->data.io.offset;
9516 dma_addr_t addr = ctl->data.io.dma_addr;
9517
9518 bnx2x_ilt_wr(bp, index, addr);
9519 break;
9520 }
9521
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009522 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9523 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +00009524
9525 bnx2x_cnic_sp_post(bp, count);
9526 break;
9527 }
9528
9529 /* rtnl_lock is held. */
9530 case DRV_CTL_START_L2_CMD: {
9531 u32 cli = ctl->data.ring.client_id;
9532
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009533 /* Set iSCSI MAC address */
9534 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9535
9536 mmiowb();
9537 barrier();
9538
9539 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9540 * because it's the only way for UIO Client to accept
9541 * multicasts (in non-promiscuous mode only one Client per
9542 * function will receive multicast packets (leading in our
9543 * case).
9544 */
9545 bnx2x_rxq_set_mac_filters(bp, cli,
9546 BNX2X_ACCEPT_UNICAST |
9547 BNX2X_ACCEPT_BROADCAST |
9548 BNX2X_ACCEPT_ALL_MULTICAST);
9549 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9550
Michael Chan993ac7b2009-10-10 13:46:56 +00009551 break;
9552 }
9553
9554 /* rtnl_lock is held. */
9555 case DRV_CTL_STOP_L2_CMD: {
9556 u32 cli = ctl->data.ring.client_id;
9557
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009558 /* Stop accepting on iSCSI L2 ring */
9559 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9560 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9561
9562 mmiowb();
9563 barrier();
9564
9565 /* Unset iSCSI L2 MAC */
9566 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009567 break;
9568 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009569 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9570 int count = ctl->data.credit.credit_count;
9571
9572 smp_mb__before_atomic_inc();
9573 atomic_add(count, &bp->spq_left);
9574 smp_mb__after_atomic_inc();
9575 break;
9576 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009577
9578 default:
9579 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9580 rc = -EINVAL;
9581 }
9582
9583 return rc;
9584}
9585
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009586void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +00009587{
9588 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9589
9590 if (bp->flags & USING_MSIX_FLAG) {
9591 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9592 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9593 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9594 } else {
9595 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9596 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9597 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009598 if (CHIP_IS_E2(bp))
9599 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9600 else
9601 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9602
Michael Chan993ac7b2009-10-10 13:46:56 +00009603 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009604 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009605 cp->irq_arr[1].status_blk = bp->def_status_blk;
9606 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009607 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009608
9609 cp->num_irq = 2;
9610}
9611
9612static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9613 void *data)
9614{
9615 struct bnx2x *bp = netdev_priv(dev);
9616 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9617
9618 if (ops == NULL)
9619 return -EINVAL;
9620
9621 if (atomic_read(&bp->intr_sem) != 0)
9622 return -EBUSY;
9623
9624 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9625 if (!bp->cnic_kwq)
9626 return -ENOMEM;
9627
9628 bp->cnic_kwq_cons = bp->cnic_kwq;
9629 bp->cnic_kwq_prod = bp->cnic_kwq;
9630 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9631
9632 bp->cnic_spq_pending = 0;
9633 bp->cnic_kwq_pending = 0;
9634
9635 bp->cnic_data = data;
9636
9637 cp->num_irq = 0;
9638 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009639 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +00009640
Michael Chan993ac7b2009-10-10 13:46:56 +00009641 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009642
Michael Chan993ac7b2009-10-10 13:46:56 +00009643 rcu_assign_pointer(bp->cnic_ops, ops);
9644
9645 return 0;
9646}
9647
9648static int bnx2x_unregister_cnic(struct net_device *dev)
9649{
9650 struct bnx2x *bp = netdev_priv(dev);
9651 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9652
9653 mutex_lock(&bp->cnic_mutex);
9654 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9655 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9656 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9657 }
9658 cp->drv_state = 0;
9659 rcu_assign_pointer(bp->cnic_ops, NULL);
9660 mutex_unlock(&bp->cnic_mutex);
9661 synchronize_rcu();
9662 kfree(bp->cnic_kwq);
9663 bp->cnic_kwq = NULL;
9664
9665 return 0;
9666}
9667
9668struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9669{
9670 struct bnx2x *bp = netdev_priv(dev);
9671 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9672
9673 cp->drv_owner = THIS_MODULE;
9674 cp->chip_id = CHIP_ID(bp);
9675 cp->pdev = bp->pdev;
9676 cp->io_base = bp->regview;
9677 cp->io_base2 = bp->doorbells;
9678 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009679 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009680 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9681 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009682 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009683 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +00009684 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9685 cp->drv_ctl = bnx2x_drv_ctl;
9686 cp->drv_register_cnic = bnx2x_register_cnic;
9687 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009688 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9689 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009690
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009691 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9692 "starting cid %d\n",
9693 cp->ctx_blk_size,
9694 cp->ctx_tbl_offset,
9695 cp->ctx_tbl_len,
9696 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +00009697 return cp;
9698}
9699EXPORT_SYMBOL(bnx2x_cnic_probe);
9700
9701#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009702