blob: ff99a2fc04267b6aef6d926cbc5483d12fbb2635 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070059#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h"
61/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000062#define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000067#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000069#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070070
Eilon Greenstein34f80b02008-06-23 20:33:01 -070071/* Time in jiffies before concluding the transmitter is hung */
72#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020073
Andrew Morton53a10562008-02-09 23:16:41 -080074static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020076 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070078MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000079MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020081MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000083MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000085MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020086
Eilon Greenstein555f6c72009-02-12 08:36:11 +000087static int multi_mode = 1;
88module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070089MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
91
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000092int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000093module_param(num_queues, int, 0);
94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000096
Eilon Greenstein19680c42008-08-13 15:47:33 -070097static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070098module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000099MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000100
101static int int_mode;
102module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000103MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000105
Eilon Greensteina18f5122009-08-12 08:23:26 +0000106static int dropless_fc;
107module_param(dropless_fc, int, 0);
108MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
Eilon Greenstein9898f862009-02-12 08:38:27 +0000110static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200111module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000112MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000113
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
Eilon Greenstein9898f862009-02-12 08:38:27 +0000118static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200119module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800122static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200123
124enum bnx2x_board_type {
125 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700126 BCM57711 = 1,
127 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000128 BCM57712 = 3,
129 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200130};
131
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700132/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800133static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200134 char *name;
135} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200141};
142
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000143#ifndef PCI_DEVICE_ID_NX2_57712
144#define PCI_DEVICE_ID_NX2_57712 0x1662
145#endif
146#ifndef PCI_DEVICE_ID_NX2_57712E
147#define PCI_DEVICE_ID_NX2_57712E 0x1663
148#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700149
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200403/* used only at init
404 * locking is done by mcp
405 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000406void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200425
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
432void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
433{
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435
436 switch (dmae->opcode & DMAE_COMMAND_DST) {
437 case DMAE_CMD_DST_PCI:
438 if (src_type == DMAE_CMD_SRC_PCI)
439 DP(msglvl, "DMAE: opcode 0x%08x\n"
440 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
442 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444 dmae->comp_addr_hi, dmae->comp_addr_lo,
445 dmae->comp_val);
446 else
447 DP(msglvl, "DMAE: opcode 0x%08x\n"
448 "src [%08x], len [%d*4], dst [%x:%08x]\n"
449 "comp_addr [%x:%08x], comp_val 0x%08x\n",
450 dmae->opcode, dmae->src_addr_lo >> 2,
451 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452 dmae->comp_addr_hi, dmae->comp_addr_lo,
453 dmae->comp_val);
454 break;
455 case DMAE_CMD_DST_GRC:
456 if (src_type == DMAE_CMD_SRC_PCI)
457 DP(msglvl, "DMAE: opcode 0x%08x\n"
458 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459 "comp_addr [%x:%08x], comp_val 0x%08x\n",
460 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461 dmae->len, dmae->dst_addr_lo >> 2,
462 dmae->comp_addr_hi, dmae->comp_addr_lo,
463 dmae->comp_val);
464 else
465 DP(msglvl, "DMAE: opcode 0x%08x\n"
466 "src [%08x], len [%d*4], dst [%08x]\n"
467 "comp_addr [%x:%08x], comp_val 0x%08x\n",
468 dmae->opcode, dmae->src_addr_lo >> 2,
469 dmae->len, dmae->dst_addr_lo >> 2,
470 dmae->comp_addr_hi, dmae->comp_addr_lo,
471 dmae->comp_val);
472 break;
473 default:
474 if (src_type == DMAE_CMD_SRC_PCI)
475 DP(msglvl, "DMAE: opcode 0x%08x\n"
476 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
477 "dst_addr [none]\n"
478 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
479 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
481 dmae->comp_val);
482 else
483 DP(msglvl, "DMAE: opcode 0x%08x\n"
484 DP_LEVEL "src_addr [%08x] len [%d * 4] "
485 "dst_addr [none]\n"
486 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
487 dmae->opcode, dmae->src_addr_lo >> 2,
488 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
489 dmae->comp_val);
490 break;
491 }
492
493}
494
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000495const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200496 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
497 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
498 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
499 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
500};
501
502/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000503void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200504{
505 u32 cmd_offset;
506 int i;
507
508 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
509 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
510 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
511
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700512 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200514 }
515 REG_WR(bp, dmae_reg_go_c[idx], 1);
516}
517
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000518u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
519{
520 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
521 DMAE_CMD_C_ENABLE);
522}
523
524u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
525{
526 return opcode & ~DMAE_CMD_SRC_RESET;
527}
528
529u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
530 bool with_comp, u8 comp_type)
531{
532 u32 opcode = 0;
533
534 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535 (dst_type << DMAE_COMMAND_DST_SHIFT));
536
537 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538
539 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
543
544#ifdef __BIG_ENDIAN
545 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
546#else
547 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
548#endif
549 if (with_comp)
550 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
551 return opcode;
552}
553
554void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
556{
557 memset(dmae, 0, sizeof(struct dmae_command));
558
559 /* set the opcode */
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
562
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
567}
568
569/* issue a dmae command over the init-channel and wailt for completion */
570int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
571{
572 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
574 int rc = 0;
575
576 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
579
580 /* lock the dmae channel */
581 mutex_lock(&bp->dmae_mutex);
582
583 /* reset completion */
584 *wb_comp = 0;
585
586 /* post the command on the channel used for initializations */
587 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
588
589 /* wait for completion */
590 udelay(5);
591 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
592 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
593
594 if (!cnt) {
595 BNX2X_ERR("DMAE timeout!\n");
596 rc = DMAE_TIMEOUT;
597 goto unlock;
598 }
599 cnt--;
600 udelay(50);
601 }
602 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
603 BNX2X_ERR("DMAE PCI error!\n");
604 rc = DMAE_PCI_ERROR;
605 }
606
607 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
610
611unlock:
612 mutex_unlock(&bp->dmae_mutex);
613 return rc;
614}
615
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700616void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
617 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200618{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000619 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700620
621 if (!bp->dmae_ready) {
622 u32 *data = bnx2x_sp(bp, wb_data[0]);
623
624 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
625 " using indirect\n", dst_addr, len32);
626 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
627 return;
628 }
629
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000630 /* set opcode and fixed command fields */
631 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200632
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000633 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000634 dmae.src_addr_lo = U64_LO(dma_addr);
635 dmae.src_addr_hi = U64_HI(dma_addr);
636 dmae.dst_addr_lo = dst_addr >> 2;
637 dmae.dst_addr_hi = 0;
638 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200639
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000640 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200641
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000642 /* issue the command and wait for completion */
643 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200644}
645
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700646void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000648 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700649
650 if (!bp->dmae_ready) {
651 u32 *data = bnx2x_sp(bp, wb_data[0]);
652 int i;
653
654 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
655 " using indirect\n", src_addr, len32);
656 for (i = 0; i < len32; i++)
657 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
658 return;
659 }
660
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000661 /* set opcode and fixed command fields */
662 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200663
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000664 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000665 dmae.src_addr_lo = src_addr >> 2;
666 dmae.src_addr_hi = 0;
667 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
668 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
669 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000671 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200672
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000673 /* issue the command and wait for completion */
674 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200675}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200676
Eilon Greenstein573f2032009-08-12 08:24:14 +0000677void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
678 u32 addr, u32 len)
679{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000680 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000681 int offset = 0;
682
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000683 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000684 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000685 addr + offset, dmae_wr_max);
686 offset += dmae_wr_max * 4;
687 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000688 }
689
690 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
691}
692
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700693/* used only for slowpath so not inlined */
694static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
695{
696 u32 wb_write[2];
697
698 wb_write[0] = val_hi;
699 wb_write[1] = val_lo;
700 REG_WR_DMAE(bp, reg, wb_write, 2);
701}
702
703#ifdef USE_WB_RD
704static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
705{
706 u32 wb_data[2];
707
708 REG_RD_DMAE(bp, reg, wb_data, 2);
709
710 return HILO_U64(wb_data[0], wb_data[1]);
711}
712#endif
713
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200714static int bnx2x_mc_assert(struct bnx2x *bp)
715{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200716 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700717 int i, rc = 0;
718 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200719
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700720 /* XSTORM */
721 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
722 XSTORM_ASSERT_LIST_INDEX_OFFSET);
723 if (last_idx)
724 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200725
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700726 /* print the asserts */
727 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200728
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700729 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_OFFSET(i));
731 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
732 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
733 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
734 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
735 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
736 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700738 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
739 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740 " 0x%08x 0x%08x 0x%08x\n",
741 i, row3, row2, row1, row0);
742 rc++;
743 } else {
744 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200745 }
746 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700747
748 /* TSTORM */
749 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
750 TSTORM_ASSERT_LIST_INDEX_OFFSET);
751 if (last_idx)
752 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
753
754 /* print the asserts */
755 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
756
757 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_OFFSET(i));
759 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
760 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
761 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
762 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
763 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
764 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
765
766 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
767 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768 " 0x%08x 0x%08x 0x%08x\n",
769 i, row3, row2, row1, row0);
770 rc++;
771 } else {
772 break;
773 }
774 }
775
776 /* CSTORM */
777 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
778 CSTORM_ASSERT_LIST_INDEX_OFFSET);
779 if (last_idx)
780 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
781
782 /* print the asserts */
783 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
784
785 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_OFFSET(i));
787 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
788 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
789 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
790 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
791 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
792 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
793
794 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
795 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796 " 0x%08x 0x%08x 0x%08x\n",
797 i, row3, row2, row1, row0);
798 rc++;
799 } else {
800 break;
801 }
802 }
803
804 /* USTORM */
805 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
806 USTORM_ASSERT_LIST_INDEX_OFFSET);
807 if (last_idx)
808 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
809
810 /* print the asserts */
811 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
812
813 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_OFFSET(i));
815 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
816 USTORM_ASSERT_LIST_OFFSET(i) + 4);
817 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
818 USTORM_ASSERT_LIST_OFFSET(i) + 8);
819 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
820 USTORM_ASSERT_LIST_OFFSET(i) + 12);
821
822 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
823 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824 " 0x%08x 0x%08x 0x%08x\n",
825 i, row3, row2, row1, row0);
826 rc++;
827 } else {
828 break;
829 }
830 }
831
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200832 return rc;
833}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800834
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200835static void bnx2x_fw_dump(struct bnx2x *bp)
836{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000837 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200838 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000839 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200840 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000841 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000842 if (BP_NOMCP(bp)) {
843 BNX2X_ERR("NO MCP - can not dump\n");
844 return;
845 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000846
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000847 if (BP_PATH(bp) == 0)
848 trace_shmem_base = bp->common.shmem_base;
849 else
850 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000852 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000853 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854 + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000855 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200856
Joe Perches7995c642010-02-17 15:01:52 +0000857 pr_err("");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000858 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200859 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000860 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200861 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000862 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200863 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000864 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200865 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000866 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000868 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200869 }
Joe Perches7995c642010-02-17 15:01:52 +0000870 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200871}
872
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000873void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200874{
875 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000876 u16 j;
877 struct hc_sp_status_block_data sp_sb_data;
878 int func = BP_FUNC(bp);
879#ifdef BNX2X_STOP_ON_ERROR
880 u16 start = 0, end = 0;
881#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200882
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700883 bp->stats_state = STATS_STATE_DISABLED;
884 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
885
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200886 BNX2X_ERR("begin crash dump -----------------\n");
887
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000888 /* Indices */
889 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000890 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000891 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000892 bp->def_idx, bp->def_att_idx,
893 bp->attn_state, bp->spq_prod_idx);
894 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
895 bp->def_status_blk->atten_status_block.attn_bits,
896 bp->def_status_blk->atten_status_block.attn_bits_ack,
897 bp->def_status_blk->atten_status_block.status_block_id,
898 bp->def_status_blk->atten_status_block.attn_bits_index);
899 BNX2X_ERR(" def (");
900 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
901 pr_cont("0x%x%s",
902 bp->def_status_blk->sp_sb.index_values[i],
903 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000904
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000905 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
908 i*sizeof(u32));
909
910 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
911 "pf_id(0x%x) vnic_id(0x%x) "
912 "vf_id(0x%x) vf_valid (0x%x)\n",
913 sp_sb_data.igu_sb_id,
914 sp_sb_data.igu_seg_id,
915 sp_sb_data.p_func.pf_id,
916 sp_sb_data.p_func.vnic_id,
917 sp_sb_data.p_func.vf_id,
918 sp_sb_data.p_func.vf_valid);
919
920
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000921 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000922 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000923 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000924 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000925 struct hc_status_block_data_e1x sb_data_e1x;
926 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000927 CHIP_IS_E2(bp) ?
928 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000929 sb_data_e1x.common.state_machine;
930 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000931 CHIP_IS_E2(bp) ?
932 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000933 sb_data_e1x.index_data;
934 int data_size;
935 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000936
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000937 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000938 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000939 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000940 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000941 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000942 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000943 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000944 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000945 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000946 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000947 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000948
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000949 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000950 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
951 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
952 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200953 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700954 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000955
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000956 loop = CHIP_IS_E2(bp) ?
957 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000958
959 /* host sb data */
960
961 BNX2X_ERR(" run indexes (");
962 for (j = 0; j < HC_SB_MAX_SM; j++)
963 pr_cont("0x%x%s",
964 fp->sb_running_index[j],
965 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
966
967 BNX2X_ERR(" indexes (");
968 for (j = 0; j < loop; j++)
969 pr_cont("0x%x%s",
970 fp->sb_index_values[j],
971 (j == loop - 1) ? ")" : " ");
972 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000973 data_size = CHIP_IS_E2(bp) ?
974 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000975 sizeof(struct hc_status_block_data_e1x);
976 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000977 sb_data_p = CHIP_IS_E2(bp) ?
978 (u32 *)&sb_data_e2 :
979 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000980 /* copy sb data in here */
981 for (j = 0; j < data_size; j++)
982 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
984 j * sizeof(u32));
985
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000986 if (CHIP_IS_E2(bp)) {
987 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
988 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
989 sb_data_e2.common.p_func.pf_id,
990 sb_data_e2.common.p_func.vf_id,
991 sb_data_e2.common.p_func.vf_valid,
992 sb_data_e2.common.p_func.vnic_id,
993 sb_data_e2.common.same_igu_sb_1b);
994 } else {
995 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
996 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
997 sb_data_e1x.common.p_func.pf_id,
998 sb_data_e1x.common.p_func.vf_id,
999 sb_data_e1x.common.p_func.vf_valid,
1000 sb_data_e1x.common.p_func.vnic_id,
1001 sb_data_e1x.common.same_igu_sb_1b);
1002 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001003
1004 /* SB_SMs data */
1005 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006 pr_cont("SM[%d] __flags (0x%x) "
1007 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1008 "time_to_expire (0x%x) "
1009 "timer_value(0x%x)\n", j,
1010 hc_sm_p[j].__flags,
1011 hc_sm_p[j].igu_sb_id,
1012 hc_sm_p[j].igu_seg_id,
1013 hc_sm_p[j].time_to_expire,
1014 hc_sm_p[j].timer_value);
1015 }
1016
1017 /* Indecies data */
1018 for (j = 0; j < loop; j++) {
1019 pr_cont("INDEX[%d] flags (0x%x) "
1020 "timeout (0x%x)\n", j,
1021 hc_index_p[j].flags,
1022 hc_index_p[j].timeout);
1023 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001024 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001025
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001026#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001027 /* Rings */
1028 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001029 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001030 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001031
1032 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1033 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001034 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001035 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1036 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1037
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001038 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1039 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001040 }
1041
Eilon Greenstein3196a882008-08-13 15:58:49 -07001042 start = RX_SGE(fp->rx_sge_prod);
1043 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001044 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001045 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1046 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1047
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001048 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1049 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001050 }
1051
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001052 start = RCQ_BD(fp->rx_comp_cons - 10);
1053 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001054 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001055 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1056
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001057 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001059 }
1060 }
1061
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001062 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001063 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001064 struct bnx2x_fastpath *fp = &bp->fp[i];
1065
1066 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1067 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1068 for (j = start; j != end; j = TX_BD(j + 1)) {
1069 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1070
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001071 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001073 }
1074
1075 start = TX_BD(fp->tx_bd_cons - 10);
1076 end = TX_BD(fp->tx_bd_cons + 254);
1077 for (j = start; j != end; j = TX_BD(j + 1)) {
1078 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1079
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001080 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001082 }
1083 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001084#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001085 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001086 bnx2x_mc_assert(bp);
1087 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001088}
1089
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001090static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001091{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001092 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001093 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1094 u32 val = REG_RD(bp, addr);
1095 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001096 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001097
1098 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001099 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1100 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001101 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1102 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001103 } else if (msi) {
1104 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1105 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1106 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001108 } else {
1109 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001110 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001111 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001113
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001114 if (!CHIP_IS_E1(bp)) {
1115 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1116 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001117
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001118 REG_WR(bp, addr, val);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001119
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001120 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1121 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001122 }
1123
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001124 if (CHIP_IS_E1(bp))
1125 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1126
Eilon Greenstein8badd272009-02-12 08:36:15 +00001127 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1128 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001129
1130 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001131 /*
1132 * Ensure that HC_CONFIG is written before leading/trailing edge config
1133 */
1134 mmiowb();
1135 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001136
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001137 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001138 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001139 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001140 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001141 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001142 /* enable nig and gpio3 attention */
1143 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001144 } else
1145 val = 0xffff;
1146
1147 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1148 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1149 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001150
1151 /* Make sure that interrupts are indeed enabled from here on */
1152 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001153}
1154
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001155static void bnx2x_igu_int_enable(struct bnx2x *bp)
1156{
1157 u32 val;
1158 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1159 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1160
1161 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1162
1163 if (msix) {
1164 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1165 IGU_PF_CONF_SINGLE_ISR_EN);
1166 val |= (IGU_PF_CONF_FUNC_EN |
1167 IGU_PF_CONF_MSI_MSIX_EN |
1168 IGU_PF_CONF_ATTN_BIT_EN);
1169 } else if (msi) {
1170 val &= ~IGU_PF_CONF_INT_LINE_EN;
1171 val |= (IGU_PF_CONF_FUNC_EN |
1172 IGU_PF_CONF_MSI_MSIX_EN |
1173 IGU_PF_CONF_ATTN_BIT_EN |
1174 IGU_PF_CONF_SINGLE_ISR_EN);
1175 } else {
1176 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1177 val |= (IGU_PF_CONF_FUNC_EN |
1178 IGU_PF_CONF_INT_LINE_EN |
1179 IGU_PF_CONF_ATTN_BIT_EN |
1180 IGU_PF_CONF_SINGLE_ISR_EN);
1181 }
1182
1183 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1184 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1185
1186 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1187
1188 barrier();
1189
1190 /* init leading/trailing edge */
1191 if (IS_MF(bp)) {
1192 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1193 if (bp->port.pmf)
1194 /* enable nig and gpio3 attention */
1195 val |= 0x1100;
1196 } else
1197 val = 0xffff;
1198
1199 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1200 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1201
1202 /* Make sure that interrupts are indeed enabled from here on */
1203 mmiowb();
1204}
1205
1206void bnx2x_int_enable(struct bnx2x *bp)
1207{
1208 if (bp->common.int_block == INT_BLOCK_HC)
1209 bnx2x_hc_int_enable(bp);
1210 else
1211 bnx2x_igu_int_enable(bp);
1212}
1213
1214static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001215{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001216 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001217 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1218 u32 val = REG_RD(bp, addr);
1219
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001220 /*
1221 * in E1 we must use only PCI configuration space to disable
1222 * MSI/MSIX capablility
1223 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1224 */
1225 if (CHIP_IS_E1(bp)) {
1226 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1227 * Use mask register to prevent from HC sending interrupts
1228 * after we exit the function
1229 */
1230 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1231
1232 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1233 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1234 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1235 } else
1236 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1237 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1238 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1239 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001240
1241 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1242 val, port, addr);
1243
Eilon Greenstein8badd272009-02-12 08:36:15 +00001244 /* flush all outstanding writes */
1245 mmiowb();
1246
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001247 REG_WR(bp, addr, val);
1248 if (REG_RD(bp, addr) != val)
1249 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1250}
1251
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001252static void bnx2x_igu_int_disable(struct bnx2x *bp)
1253{
1254 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1255
1256 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1257 IGU_PF_CONF_INT_LINE_EN |
1258 IGU_PF_CONF_ATTN_BIT_EN);
1259
1260 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1261
1262 /* flush all outstanding writes */
1263 mmiowb();
1264
1265 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1266 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1267 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1268}
1269
1270void bnx2x_int_disable(struct bnx2x *bp)
1271{
1272 if (bp->common.int_block == INT_BLOCK_HC)
1273 bnx2x_hc_int_disable(bp);
1274 else
1275 bnx2x_igu_int_disable(bp);
1276}
1277
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001278void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001279{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001280 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001281 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001282
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001283 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001284 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001285 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1286
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001287 if (disable_hw)
1288 /* prevent the HW from sending interrupts */
1289 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001290
1291 /* make sure all ISRs are done */
1292 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001293 synchronize_irq(bp->msix_table[0].vector);
1294 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001295#ifdef BCM_CNIC
1296 offset++;
1297#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001298 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001299 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001300 } else
1301 synchronize_irq(bp->pdev->irq);
1302
1303 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001304 cancel_delayed_work(&bp->sp_task);
1305 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001306}
1307
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001308/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001309
1310/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001311 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001312 */
1313
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001314/* Return true if succeeded to acquire the lock */
1315static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1316{
1317 u32 lock_status;
1318 u32 resource_bit = (1 << resource);
1319 int func = BP_FUNC(bp);
1320 u32 hw_lock_control_reg;
1321
1322 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1323
1324 /* Validating that the resource is within range */
1325 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1326 DP(NETIF_MSG_HW,
1327 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1328 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001329 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001330 }
1331
1332 if (func <= 5)
1333 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1334 else
1335 hw_lock_control_reg =
1336 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1337
1338 /* Try to acquire the lock */
1339 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1340 lock_status = REG_RD(bp, hw_lock_control_reg);
1341 if (lock_status & resource_bit)
1342 return true;
1343
1344 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1345 return false;
1346}
1347
Michael Chan993ac7b2009-10-10 13:46:56 +00001348#ifdef BCM_CNIC
1349static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1350#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001351
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001352void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001353 union eth_rx_cqe *rr_cqe)
1354{
1355 struct bnx2x *bp = fp->bp;
1356 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1357 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1358
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001359 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001360 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001361 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001362 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001363
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001364 switch (command | fp->state) {
1365 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1366 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1367 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001368 break;
1369
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001370 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1371 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001372 fp->state = BNX2X_FP_STATE_HALTED;
1373 break;
1374
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001375 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1376 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1377 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001378 break;
1379
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001380 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001381 BNX2X_ERR("unexpected MC reply (%d) "
1382 "fp[%d] state is %x\n",
1383 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001384 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001385 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001386
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001387 smp_mb__before_atomic_inc();
1388 atomic_inc(&bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001389 /* push the change in fp->state and towards the memory */
1390 smp_wmb();
1391
1392 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001393}
1394
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001395irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001396{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001397 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001398 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001399 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001400 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001401
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001402 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001403 if (unlikely(status == 0)) {
1404 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1405 return IRQ_NONE;
1406 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001407 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001408
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001409 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001410 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1411 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1412 return IRQ_HANDLED;
1413 }
1414
Eilon Greenstein3196a882008-08-13 15:58:49 -07001415#ifdef BNX2X_STOP_ON_ERROR
1416 if (unlikely(bp->panic))
1417 return IRQ_HANDLED;
1418#endif
1419
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001420 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001421 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001422
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001423 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001424 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001425 /* Handle Rx and Tx according to SB id */
1426 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001427 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001428 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001429 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001430 status &= ~mask;
1431 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001432 }
1433
Michael Chan993ac7b2009-10-10 13:46:56 +00001434#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001435 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001436 if (status & (mask | 0x1)) {
1437 struct cnic_ops *c_ops = NULL;
1438
1439 rcu_read_lock();
1440 c_ops = rcu_dereference(bp->cnic_ops);
1441 if (c_ops)
1442 c_ops->cnic_handler(bp->cnic_data, NULL);
1443 rcu_read_unlock();
1444
1445 status &= ~mask;
1446 }
1447#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001448
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001449 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001450 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001451
1452 status &= ~0x1;
1453 if (!status)
1454 return IRQ_HANDLED;
1455 }
1456
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001457 if (unlikely(status))
1458 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001459 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001460
1461 return IRQ_HANDLED;
1462}
1463
1464/* end of fast path */
1465
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001466
1467/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001468
1469/*
1470 * General service functions
1471 */
1472
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001473int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001474{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001475 u32 lock_status;
1476 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001477 int func = BP_FUNC(bp);
1478 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001479 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001480
1481 /* Validating that the resource is within range */
1482 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1483 DP(NETIF_MSG_HW,
1484 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1485 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1486 return -EINVAL;
1487 }
1488
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001489 if (func <= 5) {
1490 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1491 } else {
1492 hw_lock_control_reg =
1493 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1494 }
1495
Eliezer Tamirf1410642008-02-28 11:51:50 -08001496 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001497 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001498 if (lock_status & resource_bit) {
1499 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1500 lock_status, resource_bit);
1501 return -EEXIST;
1502 }
1503
Eilon Greenstein46230472008-08-25 15:23:30 -07001504 /* Try for 5 second every 5ms */
1505 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001506 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001507 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1508 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001509 if (lock_status & resource_bit)
1510 return 0;
1511
1512 msleep(5);
1513 }
1514 DP(NETIF_MSG_HW, "Timeout\n");
1515 return -EAGAIN;
1516}
1517
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001518int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001519{
1520 u32 lock_status;
1521 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001522 int func = BP_FUNC(bp);
1523 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001524
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001525 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1526
Eliezer Tamirf1410642008-02-28 11:51:50 -08001527 /* Validating that the resource is within range */
1528 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1529 DP(NETIF_MSG_HW,
1530 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1531 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1532 return -EINVAL;
1533 }
1534
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001535 if (func <= 5) {
1536 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1537 } else {
1538 hw_lock_control_reg =
1539 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1540 }
1541
Eliezer Tamirf1410642008-02-28 11:51:50 -08001542 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001543 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001544 if (!(lock_status & resource_bit)) {
1545 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1546 lock_status, resource_bit);
1547 return -EFAULT;
1548 }
1549
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001550 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001551 return 0;
1552}
1553
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001554
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001555int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1556{
1557 /* The GPIO should be swapped if swap register is set and active */
1558 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1559 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1560 int gpio_shift = gpio_num +
1561 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1562 u32 gpio_mask = (1 << gpio_shift);
1563 u32 gpio_reg;
1564 int value;
1565
1566 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1567 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1568 return -EINVAL;
1569 }
1570
1571 /* read GPIO value */
1572 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1573
1574 /* get the requested pin value */
1575 if ((gpio_reg & gpio_mask) == gpio_mask)
1576 value = 1;
1577 else
1578 value = 0;
1579
1580 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1581
1582 return value;
1583}
1584
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001585int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001586{
1587 /* The GPIO should be swapped if swap register is set and active */
1588 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001589 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001590 int gpio_shift = gpio_num +
1591 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1592 u32 gpio_mask = (1 << gpio_shift);
1593 u32 gpio_reg;
1594
1595 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1596 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1597 return -EINVAL;
1598 }
1599
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001600 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001601 /* read GPIO and mask except the float bits */
1602 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1603
1604 switch (mode) {
1605 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1606 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1607 gpio_num, gpio_shift);
1608 /* clear FLOAT and set CLR */
1609 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1610 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1611 break;
1612
1613 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1614 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1615 gpio_num, gpio_shift);
1616 /* clear FLOAT and set SET */
1617 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1618 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1619 break;
1620
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001621 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001622 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1623 gpio_num, gpio_shift);
1624 /* set FLOAT */
1625 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1626 break;
1627
1628 default:
1629 break;
1630 }
1631
1632 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001633 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001634
1635 return 0;
1636}
1637
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001638int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1639{
1640 /* The GPIO should be swapped if swap register is set and active */
1641 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1642 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1643 int gpio_shift = gpio_num +
1644 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1645 u32 gpio_mask = (1 << gpio_shift);
1646 u32 gpio_reg;
1647
1648 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1649 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1650 return -EINVAL;
1651 }
1652
1653 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1654 /* read GPIO int */
1655 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1656
1657 switch (mode) {
1658 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1659 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1660 "output low\n", gpio_num, gpio_shift);
1661 /* clear SET and set CLR */
1662 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1663 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1664 break;
1665
1666 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1667 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1668 "output high\n", gpio_num, gpio_shift);
1669 /* clear CLR and set SET */
1670 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1671 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1672 break;
1673
1674 default:
1675 break;
1676 }
1677
1678 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1679 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1680
1681 return 0;
1682}
1683
Eliezer Tamirf1410642008-02-28 11:51:50 -08001684static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1685{
1686 u32 spio_mask = (1 << spio_num);
1687 u32 spio_reg;
1688
1689 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1690 (spio_num > MISC_REGISTERS_SPIO_7)) {
1691 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1692 return -EINVAL;
1693 }
1694
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001695 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001696 /* read SPIO and mask except the float bits */
1697 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1698
1699 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001700 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001701 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1702 /* clear FLOAT and set CLR */
1703 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1704 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1705 break;
1706
Eilon Greenstein6378c022008-08-13 15:59:25 -07001707 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001708 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1709 /* clear FLOAT and set SET */
1710 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1711 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1712 break;
1713
1714 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1715 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1716 /* set FLOAT */
1717 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1718 break;
1719
1720 default:
1721 break;
1722 }
1723
1724 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001725 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001726
1727 return 0;
1728}
1729
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001730int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1731{
1732 u32 sel_phy_idx = 0;
1733 if (bp->link_vars.link_up) {
1734 sel_phy_idx = EXT_PHY1;
1735 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1736 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1737 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1738 sel_phy_idx = EXT_PHY2;
1739 } else {
1740
1741 switch (bnx2x_phy_selection(&bp->link_params)) {
1742 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1743 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1744 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1745 sel_phy_idx = EXT_PHY1;
1746 break;
1747 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1748 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1749 sel_phy_idx = EXT_PHY2;
1750 break;
1751 }
1752 }
1753 /*
1754 * The selected actived PHY is always after swapping (in case PHY
1755 * swapping is enabled). So when swapping is enabled, we need to reverse
1756 * the configuration
1757 */
1758
1759 if (bp->link_params.multi_phy_config &
1760 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1761 if (sel_phy_idx == EXT_PHY1)
1762 sel_phy_idx = EXT_PHY2;
1763 else if (sel_phy_idx == EXT_PHY2)
1764 sel_phy_idx = EXT_PHY1;
1765 }
1766 return LINK_CONFIG_IDX(sel_phy_idx);
1767}
1768
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001769void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001770{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001771 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001772 switch (bp->link_vars.ieee_fc &
1773 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001774 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001775 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001776 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001777 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001778
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001779 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001780 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001781 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001782 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001783
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001784 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001785 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001786 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001787
Eliezer Tamirf1410642008-02-28 11:51:50 -08001788 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001789 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001790 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001791 break;
1792 }
1793}
1794
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001795u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001796{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001797 if (!BP_NOMCP(bp)) {
1798 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001799 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1800 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001801 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001802 /* It is recommended to turn off RX FC for jumbo frames
1803 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001804 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001805 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001806 else
David S. Millerc0700f92008-12-16 23:53:20 -08001807 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001808
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001809 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001810
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001811 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001812 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001813 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1814 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001815
Eilon Greenstein19680c42008-08-13 15:47:33 -07001816 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001817
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001818 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001819
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001820 bnx2x_calc_fc_adv(bp);
1821
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001822 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1823 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001824 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001825 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001826 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001827 return rc;
1828 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001829 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001830 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001831}
1832
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001833void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001834{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001835 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001836 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001837 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001838 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001839 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001840
Eilon Greenstein19680c42008-08-13 15:47:33 -07001841 bnx2x_calc_fc_adv(bp);
1842 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001843 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001844}
1845
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001846static void bnx2x__link_reset(struct bnx2x *bp)
1847{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001848 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001849 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001850 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001851 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001852 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001853 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001854}
1855
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001856u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001857{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001858 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001859
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001860 if (!BP_NOMCP(bp)) {
1861 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001862 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1863 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001864 bnx2x_release_phy_lock(bp);
1865 } else
1866 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001867
1868 return rc;
1869}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001870
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001871static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001872{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001873 u32 r_param = bp->link_vars.line_speed / 8;
1874 u32 fair_periodic_timeout_usec;
1875 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001876
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001877 memset(&(bp->cmng.rs_vars), 0,
1878 sizeof(struct rate_shaping_vars_per_port));
1879 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001880
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001881 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1882 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001883
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001884 /* this is the threshold below which no timer arming will occur
1885 1.25 coefficient is for the threshold to be a little bigger
1886 than the real time, to compensate for timer in-accuracy */
1887 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001888 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1889
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001890 /* resolution of fairness timer */
1891 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1892 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1893 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001894
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001895 /* this is the threshold below which we won't arm the timer anymore */
1896 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001897
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001898 /* we multiply by 1e3/8 to get bytes/msec.
1899 We don't want the credits to pass a credit
1900 of the t_fair*FAIR_MEM (algorithm resolution) */
1901 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1902 /* since each tick is 4 usec */
1903 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001904}
1905
Eilon Greenstein2691d512009-08-12 08:22:08 +00001906/* Calculates the sum of vn_min_rates.
1907 It's needed for further normalizing of the min_rates.
1908 Returns:
1909 sum of vn_min_rates.
1910 or
1911 0 - if all the min_rates are 0.
1912 In the later case fainess algorithm should be deactivated.
1913 If not all min_rates are zero then those that are zeroes will be set to 1.
1914 */
1915static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1916{
1917 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001918 int vn;
1919
1920 bp->vn_weight_sum = 0;
1921 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001922 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001923 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1924 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1925
1926 /* Skip hidden vns */
1927 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1928 continue;
1929
1930 /* If min rate is zero - set it to 1 */
1931 if (!vn_min_rate)
1932 vn_min_rate = DEF_MIN_RATE;
1933 else
1934 all_zero = 0;
1935
1936 bp->vn_weight_sum += vn_min_rate;
1937 }
1938
1939 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001940 if (all_zero) {
1941 bp->cmng.flags.cmng_enables &=
1942 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1943 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1944 " fairness will be disabled\n");
1945 } else
1946 bp->cmng.flags.cmng_enables |=
1947 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001948}
1949
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001950static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001951{
1952 struct rate_shaping_vars_per_vn m_rs_vn;
1953 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001954 u32 vn_cfg = bp->mf_config[vn];
1955 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001956 u16 vn_min_rate, vn_max_rate;
1957 int i;
1958
1959 /* If function is hidden - set min and max to zeroes */
1960 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1961 vn_min_rate = 0;
1962 vn_max_rate = 0;
1963
1964 } else {
1965 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1966 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001967 /* If min rate is zero - set it to 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001968 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001969 vn_min_rate = DEF_MIN_RATE;
1970 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1971 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1972 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001973
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001974 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001975 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001976 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001977
1978 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1979 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1980
1981 /* global vn counter - maximal Mbps for this vn */
1982 m_rs_vn.vn_counter.rate = vn_max_rate;
1983
1984 /* quota - number of bytes transmitted in this period */
1985 m_rs_vn.vn_counter.quota =
1986 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1987
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001988 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001989 /* credit for each period of the fairness algorithm:
1990 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001991 vn_weight_sum should not be larger than 10000, thus
1992 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1993 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001994 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001995 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1996 (8 * bp->vn_weight_sum))),
1997 (bp->cmng.fair_vars.fair_threshold * 2));
1998 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001999 m_fair_vn.vn_credit_delta);
2000 }
2001
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002002 /* Store it to internal memory */
2003 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2004 REG_WR(bp, BAR_XSTRORM_INTMEM +
2005 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2006 ((u32 *)(&m_rs_vn))[i]);
2007
2008 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2009 REG_WR(bp, BAR_XSTRORM_INTMEM +
2010 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2011 ((u32 *)(&m_fair_vn))[i]);
2012}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002013
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002014static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2015{
2016 if (CHIP_REV_IS_SLOW(bp))
2017 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002018 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002019 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002020
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002021 return CMNG_FNS_NONE;
2022}
2023
2024static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2025{
2026 int vn;
2027
2028 if (BP_NOMCP(bp))
2029 return; /* what should be the default bvalue in this case */
2030
2031 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2032 int /*abs*/func = 2*vn + BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002033 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002034 MF_CFG_RD(bp, func_mf_config[func].config);
2035 }
2036}
2037
2038static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2039{
2040
2041 if (cmng_type == CMNG_FNS_MINMAX) {
2042 int vn;
2043
2044 /* clear cmng_enables */
2045 bp->cmng.flags.cmng_enables = 0;
2046
2047 /* read mf conf from shmem */
2048 if (read_cfg)
2049 bnx2x_read_mf_cfg(bp);
2050
2051 /* Init rate shaping and fairness contexts */
2052 bnx2x_init_port_minmax(bp);
2053
2054 /* vn_weight_sum and enable fairness if not 0 */
2055 bnx2x_calc_vn_weight_sum(bp);
2056
2057 /* calculate and set min-max rate for each vn */
2058 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2059 bnx2x_init_vn_minmax(bp, vn);
2060
2061 /* always enable rate shaping and fairness */
2062 bp->cmng.flags.cmng_enables |=
2063 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2064 if (!bp->vn_weight_sum)
2065 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2066 " fairness will be disabled\n");
2067 return;
2068 }
2069
2070 /* rate shaping and fairness are disabled */
2071 DP(NETIF_MSG_IFUP,
2072 "rate shaping and fairness are disabled\n");
2073}
2074
2075static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2076{
2077 int port = BP_PORT(bp);
2078 int func;
2079 int vn;
2080
2081 /* Set the attention towards other drivers on the same port */
2082 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2083 if (vn == BP_E1HVN(bp))
2084 continue;
2085
2086 func = ((vn << 1) | port);
2087 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2088 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2089 }
2090}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002091
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002092/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002093static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002094{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002095 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002096 /* Make sure that we are synced with the current statistics */
2097 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2098
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002099 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002100
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002101 if (bp->link_vars.link_up) {
2102
Eilon Greenstein1c063282009-02-12 08:36:43 +00002103 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002104 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002105 int port = BP_PORT(bp);
2106 u32 pause_enabled = 0;
2107
2108 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2109 pause_enabled = 1;
2110
2111 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002112 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002113 pause_enabled);
2114 }
2115
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002116 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2117 struct host_port_stats *pstats;
2118
2119 pstats = bnx2x_sp(bp, port_stats);
2120 /* reset old bmac stats */
2121 memset(&(pstats->mac_stx[0]), 0,
2122 sizeof(struct mac_stx));
2123 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002124 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002125 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2126 }
2127
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002128 /* indicate link status only if link status actually changed */
2129 if (prev_link_status != bp->link_vars.link_status)
2130 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002131
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002132 if (IS_MF(bp))
2133 bnx2x_link_sync_notify(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002134
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002135 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2136 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002137
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002138 if (cmng_fns != CMNG_FNS_NONE) {
2139 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2140 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2141 } else
2142 /* rate shaping and fairness are disabled */
2143 DP(NETIF_MSG_IFUP,
2144 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002145 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002146}
2147
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002148void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002149{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002150 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002151 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002152
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002153 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2154
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002155 if (bp->link_vars.link_up)
2156 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2157 else
2158 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2159
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002160 /* the link status update could be the result of a DCC event
2161 hence re-read the shmem mf configuration */
2162 bnx2x_read_mf_cfg(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002163
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002164 /* indicate link status */
2165 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002166}
2167
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002168static void bnx2x_pmf_update(struct bnx2x *bp)
2169{
2170 int port = BP_PORT(bp);
2171 u32 val;
2172
2173 bp->port.pmf = 1;
2174 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2175
2176 /* enable nig attention */
2177 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002178 if (bp->common.int_block == INT_BLOCK_HC) {
2179 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2180 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2181 } else if (CHIP_IS_E2(bp)) {
2182 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2183 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2184 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002185
2186 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002187}
2188
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002189/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002190
2191/* slow path */
2192
2193/*
2194 * General service functions
2195 */
2196
Eilon Greenstein2691d512009-08-12 08:22:08 +00002197/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002198u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002199{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002200 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002201 u32 seq = ++bp->fw_seq;
2202 u32 rc = 0;
2203 u32 cnt = 1;
2204 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2205
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002206 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002207 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2208 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2209
Eilon Greenstein2691d512009-08-12 08:22:08 +00002210 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2211
2212 do {
2213 /* let the FW do it's magic ... */
2214 msleep(delay);
2215
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002216 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002217
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002218 /* Give the FW up to 5 second (500*10ms) */
2219 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002220
2221 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2222 cnt*delay, rc, seq);
2223
2224 /* is this a reply to our command? */
2225 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2226 rc &= FW_MSG_CODE_MASK;
2227 else {
2228 /* FW BUG! */
2229 BNX2X_ERR("FW failed to respond!\n");
2230 bnx2x_fw_dump(bp);
2231 rc = 0;
2232 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002233 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002234
2235 return rc;
2236}
2237
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002238/* must be called under rtnl_lock */
2239void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2240{
2241 u32 mask = (1 << cl_id);
2242
2243 /* initial seeting is BNX2X_ACCEPT_NONE */
2244 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2245 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2246 u8 unmatched_unicast = 0;
2247
2248 if (filters & BNX2X_PROMISCUOUS_MODE) {
2249 /* promiscious - accept all, drop none */
2250 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2251 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2252 }
2253 if (filters & BNX2X_ACCEPT_UNICAST) {
2254 /* accept matched ucast */
2255 drop_all_ucast = 0;
2256 }
2257 if (filters & BNX2X_ACCEPT_MULTICAST) {
2258 /* accept matched mcast */
2259 drop_all_mcast = 0;
2260 }
2261 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2262 /* accept all mcast */
2263 drop_all_ucast = 0;
2264 accp_all_ucast = 1;
2265 }
2266 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2267 /* accept all mcast */
2268 drop_all_mcast = 0;
2269 accp_all_mcast = 1;
2270 }
2271 if (filters & BNX2X_ACCEPT_BROADCAST) {
2272 /* accept (all) bcast */
2273 drop_all_bcast = 0;
2274 accp_all_bcast = 1;
2275 }
2276
2277 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2278 bp->mac_filters.ucast_drop_all | mask :
2279 bp->mac_filters.ucast_drop_all & ~mask;
2280
2281 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2282 bp->mac_filters.mcast_drop_all | mask :
2283 bp->mac_filters.mcast_drop_all & ~mask;
2284
2285 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2286 bp->mac_filters.bcast_drop_all | mask :
2287 bp->mac_filters.bcast_drop_all & ~mask;
2288
2289 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2290 bp->mac_filters.ucast_accept_all | mask :
2291 bp->mac_filters.ucast_accept_all & ~mask;
2292
2293 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2294 bp->mac_filters.mcast_accept_all | mask :
2295 bp->mac_filters.mcast_accept_all & ~mask;
2296
2297 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2298 bp->mac_filters.bcast_accept_all | mask :
2299 bp->mac_filters.bcast_accept_all & ~mask;
2300
2301 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2302 bp->mac_filters.unmatched_unicast | mask :
2303 bp->mac_filters.unmatched_unicast & ~mask;
2304}
2305
2306void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2307{
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002308 struct tstorm_eth_function_common_config tcfg = {0};
2309 u16 rss_flgs;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002310
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002311 /* tpa */
2312 if (p->func_flgs & FUNC_FLG_TPA)
2313 tcfg.config_flags |=
2314 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002315
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002316 /* set rss flags */
2317 rss_flgs = (p->rss->mode <<
2318 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002319
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002320 if (p->rss->cap & RSS_IPV4_CAP)
2321 rss_flgs |= RSS_IPV4_CAP_MASK;
2322 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2323 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2324 if (p->rss->cap & RSS_IPV6_CAP)
2325 rss_flgs |= RSS_IPV6_CAP_MASK;
2326 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2327 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002328
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002329 tcfg.config_flags |= rss_flgs;
2330 tcfg.rss_result_mask = p->rss->result_mask;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002331
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002332 storm_memset_func_cfg(bp, &tcfg, p->func_id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002333
2334 /* Enable the function in the FW */
2335 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2336 storm_memset_func_en(bp, p->func_id, 1);
2337
2338 /* statistics */
2339 if (p->func_flgs & FUNC_FLG_STATS) {
2340 struct stats_indication_flags stats_flags = {0};
2341 stats_flags.collect_eth = 1;
2342
2343 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2344 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2345
2346 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2347 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2348
2349 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2350 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2351
2352 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2353 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2354 }
2355
2356 /* spq */
2357 if (p->func_flgs & FUNC_FLG_SPQ) {
2358 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2359 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2360 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2361 }
2362}
2363
2364static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2365 struct bnx2x_fastpath *fp)
2366{
2367 u16 flags = 0;
2368
2369 /* calculate queue flags */
2370 flags |= QUEUE_FLG_CACHE_ALIGN;
2371 flags |= QUEUE_FLG_HC;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002372 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002373
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002374 flags |= QUEUE_FLG_VLAN;
2375 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002376
2377 if (!fp->disable_tpa)
2378 flags |= QUEUE_FLG_TPA;
2379
2380 flags |= QUEUE_FLG_STATS;
2381
2382 return flags;
2383}
2384
2385static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2386 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2387 struct bnx2x_rxq_init_params *rxq_init)
2388{
2389 u16 max_sge = 0;
2390 u16 sge_sz = 0;
2391 u16 tpa_agg_size = 0;
2392
2393 /* calculate queue flags */
2394 u16 flags = bnx2x_get_cl_flags(bp, fp);
2395
2396 if (!fp->disable_tpa) {
2397 pause->sge_th_hi = 250;
2398 pause->sge_th_lo = 150;
2399 tpa_agg_size = min_t(u32,
2400 (min_t(u32, 8, MAX_SKB_FRAGS) *
2401 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2402 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2403 SGE_PAGE_SHIFT;
2404 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2405 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2406 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2407 0xffff);
2408 }
2409
2410 /* pause - not for e1 */
2411 if (!CHIP_IS_E1(bp)) {
2412 pause->bd_th_hi = 350;
2413 pause->bd_th_lo = 250;
2414 pause->rcq_th_hi = 350;
2415 pause->rcq_th_lo = 250;
2416 pause->sge_th_hi = 0;
2417 pause->sge_th_lo = 0;
2418 pause->pri_map = 1;
2419 }
2420
2421 /* rxq setup */
2422 rxq_init->flags = flags;
2423 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2424 rxq_init->dscr_map = fp->rx_desc_mapping;
2425 rxq_init->sge_map = fp->rx_sge_mapping;
2426 rxq_init->rcq_map = fp->rx_comp_mapping;
2427 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2428 rxq_init->mtu = bp->dev->mtu;
2429 rxq_init->buf_sz = bp->rx_buf_size;
2430 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2431 rxq_init->cl_id = fp->cl_id;
2432 rxq_init->spcl_id = fp->cl_id;
2433 rxq_init->stat_id = fp->cl_id;
2434 rxq_init->tpa_agg_sz = tpa_agg_size;
2435 rxq_init->sge_buf_sz = sge_sz;
2436 rxq_init->max_sges_pkt = max_sge;
2437 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2438 rxq_init->fw_sb_id = fp->fw_sb_id;
2439
2440 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2441
2442 rxq_init->cid = HW_CID(bp, fp->cid);
2443
2444 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2445}
2446
2447static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2448 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2449{
2450 u16 flags = bnx2x_get_cl_flags(bp, fp);
2451
2452 txq_init->flags = flags;
2453 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2454 txq_init->dscr_map = fp->tx_desc_mapping;
2455 txq_init->stat_id = fp->cl_id;
2456 txq_init->cid = HW_CID(bp, fp->cid);
2457 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2458 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2459 txq_init->fw_sb_id = fp->fw_sb_id;
2460 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2461}
2462
2463void bnx2x_pf_init(struct bnx2x *bp)
2464{
2465 struct bnx2x_func_init_params func_init = {0};
2466 struct bnx2x_rss_params rss = {0};
2467 struct event_ring_data eq_data = { {0} };
2468 u16 flags;
2469
2470 /* pf specific setups */
2471 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002472 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002473
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002474 if (CHIP_IS_E2(bp)) {
2475 /* reset IGU PF statistics: MSIX + ATTN */
2476 /* PF */
2477 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2478 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2479 (CHIP_MODE_IS_4_PORT(bp) ?
2480 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2481 /* ATTN */
2482 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2483 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2484 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2485 (CHIP_MODE_IS_4_PORT(bp) ?
2486 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2487 }
2488
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002489 /* function setup flags */
2490 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2491
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002492 if (CHIP_IS_E1x(bp))
2493 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2494 else
2495 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002496
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002497 /* function setup */
2498
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002499 /**
2500 * Although RSS is meaningless when there is a single HW queue we
2501 * still need it enabled in order to have HW Rx hash generated.
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002502 */
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002503 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2504 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2505 rss.mode = bp->multi_mode;
2506 rss.result_mask = MULTI_MASK;
2507 func_init.rss = &rss;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002508
2509 func_init.func_flgs = flags;
2510 func_init.pf_id = BP_FUNC(bp);
2511 func_init.func_id = BP_FUNC(bp);
2512 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2513 func_init.spq_map = bp->spq_mapping;
2514 func_init.spq_prod = bp->spq_prod_idx;
2515
2516 bnx2x_func_init(bp, &func_init);
2517
2518 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2519
2520 /*
2521 Congestion management values depend on the link rate
2522 There is no active link so initial link rate is set to 10 Gbps.
2523 When the link comes up The congestion management values are
2524 re-calculated according to the actual link rate.
2525 */
2526 bp->link_vars.line_speed = SPEED_10000;
2527 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2528
2529 /* Only the PMF sets the HW */
2530 if (bp->port.pmf)
2531 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2532
2533 /* no rx until link is up */
2534 bp->rx_mode = BNX2X_RX_MODE_NONE;
2535 bnx2x_set_storm_rx_mode(bp);
2536
2537 /* init Event Queue */
2538 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2539 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2540 eq_data.producer = bp->eq_prod;
2541 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2542 eq_data.sb_id = DEF_SB_ID;
2543 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2544}
2545
2546
Eilon Greenstein2691d512009-08-12 08:22:08 +00002547static void bnx2x_e1h_disable(struct bnx2x *bp)
2548{
2549 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002550
2551 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002552
2553 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2554
Eilon Greenstein2691d512009-08-12 08:22:08 +00002555 netif_carrier_off(bp->dev);
2556}
2557
2558static void bnx2x_e1h_enable(struct bnx2x *bp)
2559{
2560 int port = BP_PORT(bp);
2561
2562 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2563
Eilon Greenstein2691d512009-08-12 08:22:08 +00002564 /* Tx queue should be only reenabled */
2565 netif_tx_wake_all_queues(bp->dev);
2566
Eilon Greenstein061bc702009-10-15 00:18:47 -07002567 /*
2568 * Should not call netif_carrier_on since it will be called if the link
2569 * is up when checking for link state
2570 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002571}
2572
Eilon Greenstein2691d512009-08-12 08:22:08 +00002573static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2574{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002575 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002576
2577 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2578
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002579 /*
2580 * This is the only place besides the function initialization
2581 * where the bp->flags can change so it is done without any
2582 * locks
2583 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002584 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002585 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002586 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002587
2588 bnx2x_e1h_disable(bp);
2589 } else {
2590 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002591 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002592
2593 bnx2x_e1h_enable(bp);
2594 }
2595 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2596 }
2597 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2598
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002599 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2600 bnx2x_link_sync_notify(bp);
2601 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002602 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2603 }
2604
2605 /* Report results to MCP */
2606 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002607 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002608 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002609 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002610}
2611
Michael Chan28912902009-10-10 13:46:53 +00002612/* must be called under the spq lock */
2613static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2614{
2615 struct eth_spe *next_spe = bp->spq_prod_bd;
2616
2617 if (bp->spq_prod_bd == bp->spq_last_bd) {
2618 bp->spq_prod_bd = bp->spq;
2619 bp->spq_prod_idx = 0;
2620 DP(NETIF_MSG_TIMER, "end of spq\n");
2621 } else {
2622 bp->spq_prod_bd++;
2623 bp->spq_prod_idx++;
2624 }
2625 return next_spe;
2626}
2627
2628/* must be called under the spq lock */
2629static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2630{
2631 int func = BP_FUNC(bp);
2632
2633 /* Make sure that BD data is updated before writing the producer */
2634 wmb();
2635
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002636 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002637 bp->spq_prod_idx);
Michael Chan28912902009-10-10 13:46:53 +00002638 mmiowb();
2639}
2640
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002641/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002642int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002643 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002644{
Michael Chan28912902009-10-10 13:46:53 +00002645 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002646 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002647
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002648#ifdef BNX2X_STOP_ON_ERROR
2649 if (unlikely(bp->panic))
2650 return -EIO;
2651#endif
2652
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002653 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002654
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002655 if (!atomic_read(&bp->spq_left)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002656 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002657 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002658 bnx2x_panic();
2659 return -EBUSY;
2660 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002661
Michael Chan28912902009-10-10 13:46:53 +00002662 spe = bnx2x_sp_get_next(bp);
2663
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002664 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002665 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002666 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2667 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002668
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002669 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002670 /* Common ramrods:
2671 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2672 * TRAFFIC_STOP, TRAFFIC_START
2673 */
2674 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2675 & SPE_HDR_CONN_TYPE;
2676 else
2677 /* ETH ramrods: SETUP, HALT */
2678 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2679 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002680
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002681 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2682 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002683
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002684 spe->hdr.type = cpu_to_le16(type);
2685
2686 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2687 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2688
2689 /* stats ramrod has it's own slot on the spq */
2690 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2691 /* It's ok if the actual decrement is issued towards the memory
2692 * somewhere between the spin_lock and spin_unlock. Thus no
2693 * more explict memory barrier is needed.
2694 */
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002695 atomic_dec(&bp->spq_left);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002696
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002697 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002698 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2699 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002700 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2701 (u32)(U64_LO(bp->spq_mapping) +
2702 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002703 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002704
Michael Chan28912902009-10-10 13:46:53 +00002705 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002706 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002707 return 0;
2708}
2709
2710/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002711static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002712{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002713 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002714 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002715
2716 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002717 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002718 val = (1UL << 31);
2719 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2720 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2721 if (val & (1L << 31))
2722 break;
2723
2724 msleep(5);
2725 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002726 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002727 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002728 rc = -EBUSY;
2729 }
2730
2731 return rc;
2732}
2733
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002734/* release split MCP access lock register */
2735static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002736{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002737 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002738}
2739
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002740#define BNX2X_DEF_SB_ATT_IDX 0x0001
2741#define BNX2X_DEF_SB_IDX 0x0002
2742
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002743static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2744{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002745 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002746 u16 rc = 0;
2747
2748 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002749 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2750 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002751 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002752 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002753
2754 if (bp->def_idx != def_sb->sp_sb.running_index) {
2755 bp->def_idx = def_sb->sp_sb.running_index;
2756 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002757 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002758
2759 /* Do not reorder: indecies reading should complete before handling */
2760 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002761 return rc;
2762}
2763
2764/*
2765 * slow path service functions
2766 */
2767
2768static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2769{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002770 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002771 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2772 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002773 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2774 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002775 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002776 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002777 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002778
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002779 if (bp->attn_state & asserted)
2780 BNX2X_ERR("IGU ERROR\n");
2781
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002782 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2783 aeu_mask = REG_RD(bp, aeu_addr);
2784
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002785 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002786 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002787 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002788 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002789
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002790 REG_WR(bp, aeu_addr, aeu_mask);
2791 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002792
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002793 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002794 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002795 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002796
2797 if (asserted & ATTN_HARD_WIRED_MASK) {
2798 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002799
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002800 bnx2x_acquire_phy_lock(bp);
2801
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002802 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002803 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002804 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002805
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002806 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002807
2808 /* handle unicore attn? */
2809 }
2810 if (asserted & ATTN_SW_TIMER_4_FUNC)
2811 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2812
2813 if (asserted & GPIO_2_FUNC)
2814 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2815
2816 if (asserted & GPIO_3_FUNC)
2817 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2818
2819 if (asserted & GPIO_4_FUNC)
2820 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2821
2822 if (port == 0) {
2823 if (asserted & ATTN_GENERAL_ATTN_1) {
2824 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2825 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2826 }
2827 if (asserted & ATTN_GENERAL_ATTN_2) {
2828 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2829 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2830 }
2831 if (asserted & ATTN_GENERAL_ATTN_3) {
2832 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2833 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2834 }
2835 } else {
2836 if (asserted & ATTN_GENERAL_ATTN_4) {
2837 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2838 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2839 }
2840 if (asserted & ATTN_GENERAL_ATTN_5) {
2841 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2842 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2843 }
2844 if (asserted & ATTN_GENERAL_ATTN_6) {
2845 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2846 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2847 }
2848 }
2849
2850 } /* if hardwired */
2851
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002852 if (bp->common.int_block == INT_BLOCK_HC)
2853 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2854 COMMAND_REG_ATTN_BITS_SET);
2855 else
2856 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2857
2858 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2859 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2860 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002861
2862 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002863 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002864 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002865 bnx2x_release_phy_lock(bp);
2866 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002867}
2868
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002869static inline void bnx2x_fan_failure(struct bnx2x *bp)
2870{
2871 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002872 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002873 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002874 ext_phy_config =
2875 SHMEM_RD(bp,
2876 dev_info.port_hw_config[port].external_phy_config);
2877
2878 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2879 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002880 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002881 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002882
2883 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002884 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2885 " the driver to shutdown the card to prevent permanent"
2886 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002887}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002888
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002889static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2890{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002891 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002892 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002893 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002894
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002895 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2896 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002897
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002898 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002899
2900 val = REG_RD(bp, reg_offset);
2901 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2902 REG_WR(bp, reg_offset, val);
2903
2904 BNX2X_ERR("SPIO5 hw attention\n");
2905
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002906 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002907 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002908 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002909 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002910
Eilon Greenstein589abe32009-02-12 08:36:55 +00002911 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2912 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2913 bnx2x_acquire_phy_lock(bp);
2914 bnx2x_handle_module_detect_int(&bp->link_params);
2915 bnx2x_release_phy_lock(bp);
2916 }
2917
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002918 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2919
2920 val = REG_RD(bp, reg_offset);
2921 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2922 REG_WR(bp, reg_offset, val);
2923
2924 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002925 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002926 bnx2x_panic();
2927 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002928}
2929
2930static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2931{
2932 u32 val;
2933
Eilon Greenstein0626b892009-02-12 08:38:14 +00002934 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002935
2936 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2937 BNX2X_ERR("DB hw attention 0x%x\n", val);
2938 /* DORQ discard attention */
2939 if (val & 0x2)
2940 BNX2X_ERR("FATAL error from DORQ\n");
2941 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002942
2943 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2944
2945 int port = BP_PORT(bp);
2946 int reg_offset;
2947
2948 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2949 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2950
2951 val = REG_RD(bp, reg_offset);
2952 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2953 REG_WR(bp, reg_offset, val);
2954
2955 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002956 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002957 bnx2x_panic();
2958 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002959}
2960
2961static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2962{
2963 u32 val;
2964
2965 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2966
2967 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2968 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2969 /* CFC error attention */
2970 if (val & 0x2)
2971 BNX2X_ERR("FATAL error from CFC\n");
2972 }
2973
2974 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2975
2976 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2977 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2978 /* RQ_USDMDP_FIFO_OVERFLOW */
2979 if (val & 0x18000)
2980 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002981 if (CHIP_IS_E2(bp)) {
2982 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2983 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2984 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002985 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002986
2987 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2988
2989 int port = BP_PORT(bp);
2990 int reg_offset;
2991
2992 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2993 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2994
2995 val = REG_RD(bp, reg_offset);
2996 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2997 REG_WR(bp, reg_offset, val);
2998
2999 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003000 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003001 bnx2x_panic();
3002 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003003}
3004
3005static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3006{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003007 u32 val;
3008
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003009 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3010
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003011 if (attn & BNX2X_PMF_LINK_ASSERT) {
3012 int func = BP_FUNC(bp);
3013
3014 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003015 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3016 func_mf_config[BP_ABS_FUNC(bp)].config);
3017 val = SHMEM_RD(bp,
3018 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003019 if (val & DRV_STATUS_DCC_EVENT_MASK)
3020 bnx2x_dcc_event(bp,
3021 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003022 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003023 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003024 bnx2x_pmf_update(bp);
3025
3026 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003027
3028 BNX2X_ERR("MC assert!\n");
3029 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3030 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3031 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3032 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3033 bnx2x_panic();
3034
3035 } else if (attn & BNX2X_MCP_ASSERT) {
3036
3037 BNX2X_ERR("MCP assert!\n");
3038 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003039 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003040
3041 } else
3042 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3043 }
3044
3045 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003046 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3047 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003048 val = CHIP_IS_E1(bp) ? 0 :
3049 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003050 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3051 }
3052 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003053 val = CHIP_IS_E1(bp) ? 0 :
3054 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003055 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3056 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003057 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003058 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003059}
3060
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003061#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3062#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3063#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3064#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3065#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3066#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003067
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003068/*
3069 * should be run under rtnl lock
3070 */
3071static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3072{
3073 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3074 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3075 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3076 barrier();
3077 mmiowb();
3078}
3079
3080/*
3081 * should be run under rtnl lock
3082 */
3083static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3084{
3085 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3086 val |= (1 << 16);
3087 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3088 barrier();
3089 mmiowb();
3090}
3091
3092/*
3093 * should be run under rtnl lock
3094 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003095bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003096{
3097 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3098 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3099 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3100}
3101
3102/*
3103 * should be run under rtnl lock
3104 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003105inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003106{
3107 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3108
3109 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3110
3111 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3112 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3113 barrier();
3114 mmiowb();
3115}
3116
3117/*
3118 * should be run under rtnl lock
3119 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003120u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003121{
3122 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3123
3124 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3125
3126 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3127 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3128 barrier();
3129 mmiowb();
3130
3131 return val1;
3132}
3133
3134/*
3135 * should be run under rtnl lock
3136 */
3137static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3138{
3139 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3140}
3141
3142static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3143{
3144 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3145 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3146}
3147
3148static inline void _print_next_block(int idx, const char *blk)
3149{
3150 if (idx)
3151 pr_cont(", ");
3152 pr_cont("%s", blk);
3153}
3154
3155static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3156{
3157 int i = 0;
3158 u32 cur_bit = 0;
3159 for (i = 0; sig; i++) {
3160 cur_bit = ((u32)0x1 << i);
3161 if (sig & cur_bit) {
3162 switch (cur_bit) {
3163 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3164 _print_next_block(par_num++, "BRB");
3165 break;
3166 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3167 _print_next_block(par_num++, "PARSER");
3168 break;
3169 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3170 _print_next_block(par_num++, "TSDM");
3171 break;
3172 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3173 _print_next_block(par_num++, "SEARCHER");
3174 break;
3175 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3176 _print_next_block(par_num++, "TSEMI");
3177 break;
3178 }
3179
3180 /* Clear the bit */
3181 sig &= ~cur_bit;
3182 }
3183 }
3184
3185 return par_num;
3186}
3187
3188static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3189{
3190 int i = 0;
3191 u32 cur_bit = 0;
3192 for (i = 0; sig; i++) {
3193 cur_bit = ((u32)0x1 << i);
3194 if (sig & cur_bit) {
3195 switch (cur_bit) {
3196 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3197 _print_next_block(par_num++, "PBCLIENT");
3198 break;
3199 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3200 _print_next_block(par_num++, "QM");
3201 break;
3202 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3203 _print_next_block(par_num++, "XSDM");
3204 break;
3205 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3206 _print_next_block(par_num++, "XSEMI");
3207 break;
3208 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3209 _print_next_block(par_num++, "DOORBELLQ");
3210 break;
3211 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3212 _print_next_block(par_num++, "VAUX PCI CORE");
3213 break;
3214 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3215 _print_next_block(par_num++, "DEBUG");
3216 break;
3217 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3218 _print_next_block(par_num++, "USDM");
3219 break;
3220 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3221 _print_next_block(par_num++, "USEMI");
3222 break;
3223 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3224 _print_next_block(par_num++, "UPB");
3225 break;
3226 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3227 _print_next_block(par_num++, "CSDM");
3228 break;
3229 }
3230
3231 /* Clear the bit */
3232 sig &= ~cur_bit;
3233 }
3234 }
3235
3236 return par_num;
3237}
3238
3239static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3240{
3241 int i = 0;
3242 u32 cur_bit = 0;
3243 for (i = 0; sig; i++) {
3244 cur_bit = ((u32)0x1 << i);
3245 if (sig & cur_bit) {
3246 switch (cur_bit) {
3247 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3248 _print_next_block(par_num++, "CSEMI");
3249 break;
3250 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3251 _print_next_block(par_num++, "PXP");
3252 break;
3253 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3254 _print_next_block(par_num++,
3255 "PXPPCICLOCKCLIENT");
3256 break;
3257 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3258 _print_next_block(par_num++, "CFC");
3259 break;
3260 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3261 _print_next_block(par_num++, "CDU");
3262 break;
3263 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3264 _print_next_block(par_num++, "IGU");
3265 break;
3266 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3267 _print_next_block(par_num++, "MISC");
3268 break;
3269 }
3270
3271 /* Clear the bit */
3272 sig &= ~cur_bit;
3273 }
3274 }
3275
3276 return par_num;
3277}
3278
3279static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3280{
3281 int i = 0;
3282 u32 cur_bit = 0;
3283 for (i = 0; sig; i++) {
3284 cur_bit = ((u32)0x1 << i);
3285 if (sig & cur_bit) {
3286 switch (cur_bit) {
3287 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3288 _print_next_block(par_num++, "MCP ROM");
3289 break;
3290 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3291 _print_next_block(par_num++, "MCP UMP RX");
3292 break;
3293 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3294 _print_next_block(par_num++, "MCP UMP TX");
3295 break;
3296 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3297 _print_next_block(par_num++, "MCP SCPAD");
3298 break;
3299 }
3300
3301 /* Clear the bit */
3302 sig &= ~cur_bit;
3303 }
3304 }
3305
3306 return par_num;
3307}
3308
3309static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3310 u32 sig2, u32 sig3)
3311{
3312 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3313 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3314 int par_num = 0;
3315 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3316 "[0]:0x%08x [1]:0x%08x "
3317 "[2]:0x%08x [3]:0x%08x\n",
3318 sig0 & HW_PRTY_ASSERT_SET_0,
3319 sig1 & HW_PRTY_ASSERT_SET_1,
3320 sig2 & HW_PRTY_ASSERT_SET_2,
3321 sig3 & HW_PRTY_ASSERT_SET_3);
3322 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3323 bp->dev->name);
3324 par_num = bnx2x_print_blocks_with_parity0(
3325 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3326 par_num = bnx2x_print_blocks_with_parity1(
3327 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3328 par_num = bnx2x_print_blocks_with_parity2(
3329 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3330 par_num = bnx2x_print_blocks_with_parity3(
3331 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3332 printk("\n");
3333 return true;
3334 } else
3335 return false;
3336}
3337
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003338bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003339{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003340 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003341 int port = BP_PORT(bp);
3342
3343 attn.sig[0] = REG_RD(bp,
3344 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3345 port*4);
3346 attn.sig[1] = REG_RD(bp,
3347 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3348 port*4);
3349 attn.sig[2] = REG_RD(bp,
3350 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3351 port*4);
3352 attn.sig[3] = REG_RD(bp,
3353 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3354 port*4);
3355
3356 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3357 attn.sig[3]);
3358}
3359
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003360
3361static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3362{
3363 u32 val;
3364 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3365
3366 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3367 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3368 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3369 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3370 "ADDRESS_ERROR\n");
3371 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 "INCORRECT_RCV_BEHAVIOR\n");
3374 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3375 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3376 "WAS_ERROR_ATTN\n");
3377 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3378 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3379 "VF_LENGTH_VIOLATION_ATTN\n");
3380 if (val &
3381 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3382 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3383 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3384 if (val &
3385 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3386 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3387 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3388 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3389 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3390 "TCPL_ERROR_ATTN\n");
3391 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3392 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3393 "TCPL_IN_TWO_RCBS_ATTN\n");
3394 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3395 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3396 "CSSNOOP_FIFO_OVERFLOW\n");
3397 }
3398 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3399 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3400 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3401 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3402 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3403 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3404 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3405 "_ATC_TCPL_TO_NOT_PEND\n");
3406 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3407 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3408 "ATC_GPA_MULTIPLE_HITS\n");
3409 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3410 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3411 "ATC_RCPL_TO_EMPTY_CNT\n");
3412 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3413 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3414 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3415 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3416 "ATC_IREQ_LESS_THAN_STU\n");
3417 }
3418
3419 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3420 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3421 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3422 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3423 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3424 }
3425
3426}
3427
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003428static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3429{
3430 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003431 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003432 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003433 u32 reg_addr;
3434 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003435 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003436
3437 /* need to take HW lock because MCP or other port might also
3438 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003439 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003440
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003441 if (bnx2x_chk_parity_attn(bp)) {
3442 bp->recovery_state = BNX2X_RECOVERY_INIT;
3443 bnx2x_set_reset_in_progress(bp);
3444 schedule_delayed_work(&bp->reset_task, 0);
3445 /* Disable HW interrupts */
3446 bnx2x_int_disable(bp);
3447 bnx2x_release_alr(bp);
3448 /* In case of parity errors don't handle attentions so that
3449 * other function would "see" parity errors.
3450 */
3451 return;
3452 }
3453
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003454 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3455 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3456 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3457 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003458 if (CHIP_IS_E2(bp))
3459 attn.sig[4] =
3460 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3461 else
3462 attn.sig[4] = 0;
3463
3464 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3465 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003466
3467 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3468 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003469 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003470
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003471 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3472 "%08x %08x %08x\n",
3473 index,
3474 group_mask->sig[0], group_mask->sig[1],
3475 group_mask->sig[2], group_mask->sig[3],
3476 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003477
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003478 bnx2x_attn_int_deasserted4(bp,
3479 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003480 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003481 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003482 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003483 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003484 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003485 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003486 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003487 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003488 }
3489 }
3490
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003491 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003492
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003493 if (bp->common.int_block == INT_BLOCK_HC)
3494 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3495 COMMAND_REG_ATTN_BITS_CLR);
3496 else
3497 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003498
3499 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003500 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3501 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003502 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003503
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003504 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003505 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003506
3507 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3508 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3509
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003510 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3511 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003512
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003513 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3514 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003515 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003516 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3517
3518 REG_WR(bp, reg_addr, aeu_mask);
3519 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003520
3521 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3522 bp->attn_state &= ~deasserted;
3523 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3524}
3525
3526static void bnx2x_attn_int(struct bnx2x *bp)
3527{
3528 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003529 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3530 attn_bits);
3531 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3532 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003533 u32 attn_state = bp->attn_state;
3534
3535 /* look for changed bits */
3536 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3537 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3538
3539 DP(NETIF_MSG_HW,
3540 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3541 attn_bits, attn_ack, asserted, deasserted);
3542
3543 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003544 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003545
3546 /* handle bits that were raised */
3547 if (asserted)
3548 bnx2x_attn_int_asserted(bp, asserted);
3549
3550 if (deasserted)
3551 bnx2x_attn_int_deasserted(bp, deasserted);
3552}
3553
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003554static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3555{
3556 /* No memory barriers */
3557 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3558 mmiowb(); /* keep prod updates ordered */
3559}
3560
3561#ifdef BCM_CNIC
3562static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3563 union event_ring_elem *elem)
3564{
3565 if (!bp->cnic_eth_dev.starting_cid ||
3566 cid < bp->cnic_eth_dev.starting_cid)
3567 return 1;
3568
3569 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3570
3571 if (unlikely(elem->message.data.cfc_del_event.error)) {
3572 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3573 cid);
3574 bnx2x_panic_dump(bp);
3575 }
3576 bnx2x_cnic_cfc_comp(bp, cid);
3577 return 0;
3578}
3579#endif
3580
3581static void bnx2x_eq_int(struct bnx2x *bp)
3582{
3583 u16 hw_cons, sw_cons, sw_prod;
3584 union event_ring_elem *elem;
3585 u32 cid;
3586 u8 opcode;
3587 int spqe_cnt = 0;
3588
3589 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3590
3591 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3592 * when we get the the next-page we nned to adjust so the loop
3593 * condition below will be met. The next element is the size of a
3594 * regular element and hence incrementing by 1
3595 */
3596 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3597 hw_cons++;
3598
3599 /* This function may never run in parralel with itself for a
3600 * specific bp, thus there is no need in "paired" read memory
3601 * barrier here.
3602 */
3603 sw_cons = bp->eq_cons;
3604 sw_prod = bp->eq_prod;
3605
3606 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003607 hw_cons, sw_cons, atomic_read(&bp->spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003608
3609 for (; sw_cons != hw_cons;
3610 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3611
3612
3613 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3614
3615 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3616 opcode = elem->message.opcode;
3617
3618
3619 /* handle eq element */
3620 switch (opcode) {
3621 case EVENT_RING_OPCODE_STAT_QUERY:
3622 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3623 /* nothing to do with stats comp */
3624 continue;
3625
3626 case EVENT_RING_OPCODE_CFC_DEL:
3627 /* handle according to cid range */
3628 /*
3629 * we may want to verify here that the bp state is
3630 * HALTING
3631 */
3632 DP(NETIF_MSG_IFDOWN,
3633 "got delete ramrod for MULTI[%d]\n", cid);
3634#ifdef BCM_CNIC
3635 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3636 goto next_spqe;
3637#endif
3638 bnx2x_fp(bp, cid, state) =
3639 BNX2X_FP_STATE_CLOSED;
3640
3641 goto next_spqe;
3642 }
3643
3644 switch (opcode | bp->state) {
3645 case (EVENT_RING_OPCODE_FUNCTION_START |
3646 BNX2X_STATE_OPENING_WAIT4_PORT):
3647 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3648 bp->state = BNX2X_STATE_FUNC_STARTED;
3649 break;
3650
3651 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3652 BNX2X_STATE_CLOSING_WAIT4_HALT):
3653 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3654 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3655 break;
3656
3657 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3658 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3659 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3660 bp->set_mac_pending = 0;
3661 break;
3662
3663 case (EVENT_RING_OPCODE_SET_MAC |
3664 BNX2X_STATE_CLOSING_WAIT4_HALT):
3665 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3666 bp->set_mac_pending = 0;
3667 break;
3668 default:
3669 /* unknown event log error and continue */
3670 BNX2X_ERR("Unknown EQ event %d\n",
3671 elem->message.opcode);
3672 }
3673next_spqe:
3674 spqe_cnt++;
3675 } /* for */
3676
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003677 smp_mb__before_atomic_inc();
3678 atomic_add(spqe_cnt, &bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003679
3680 bp->eq_cons = sw_cons;
3681 bp->eq_prod = sw_prod;
3682 /* Make sure that above mem writes were issued towards the memory */
3683 smp_wmb();
3684
3685 /* update producer */
3686 bnx2x_update_eq_prod(bp, bp->eq_prod);
3687}
3688
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003689static void bnx2x_sp_task(struct work_struct *work)
3690{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003691 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003692 u16 status;
3693
3694 /* Return here if interrupt is disabled */
3695 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003696 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003697 return;
3698 }
3699
3700 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003701/* if (status == 0) */
3702/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003703
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003704 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003705
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003706 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003707 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003708 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003709 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003710 }
3711
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003712 /* SP events: STAT_QUERY and others */
3713 if (status & BNX2X_DEF_SB_IDX) {
3714
3715 /* Handle EQ completions */
3716 bnx2x_eq_int(bp);
3717
3718 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3719 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3720
3721 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003722 }
3723
3724 if (unlikely(status))
3725 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3726 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003727
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003728 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3729 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003730}
3731
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003732irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003733{
3734 struct net_device *dev = dev_instance;
3735 struct bnx2x *bp = netdev_priv(dev);
3736
3737 /* Return here if interrupt is disabled */
3738 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003739 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003740 return IRQ_HANDLED;
3741 }
3742
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003743 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3744 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003745
3746#ifdef BNX2X_STOP_ON_ERROR
3747 if (unlikely(bp->panic))
3748 return IRQ_HANDLED;
3749#endif
3750
Michael Chan993ac7b2009-10-10 13:46:56 +00003751#ifdef BCM_CNIC
3752 {
3753 struct cnic_ops *c_ops;
3754
3755 rcu_read_lock();
3756 c_ops = rcu_dereference(bp->cnic_ops);
3757 if (c_ops)
3758 c_ops->cnic_handler(bp->cnic_data, NULL);
3759 rcu_read_unlock();
3760 }
3761#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003762 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003763
3764 return IRQ_HANDLED;
3765}
3766
3767/* end of slow path */
3768
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003769static void bnx2x_timer(unsigned long data)
3770{
3771 struct bnx2x *bp = (struct bnx2x *) data;
3772
3773 if (!netif_running(bp->dev))
3774 return;
3775
3776 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003777 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003778
3779 if (poll) {
3780 struct bnx2x_fastpath *fp = &bp->fp[0];
3781 int rc;
3782
Eilon Greenstein7961f792009-03-02 07:59:31 +00003783 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003784 rc = bnx2x_rx_int(fp, 1000);
3785 }
3786
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003787 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003788 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003789 u32 drv_pulse;
3790 u32 mcp_pulse;
3791
3792 ++bp->fw_drv_pulse_wr_seq;
3793 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3794 /* TBD - add SYSTEM_TIME */
3795 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003796 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003797
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003798 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003799 MCP_PULSE_SEQ_MASK);
3800 /* The delta between driver pulse and mcp response
3801 * should be 1 (before mcp response) or 0 (after mcp response)
3802 */
3803 if ((drv_pulse != mcp_pulse) &&
3804 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3805 /* someone lost a heartbeat... */
3806 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3807 drv_pulse, mcp_pulse);
3808 }
3809 }
3810
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003811 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003812 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003813
Eliezer Tamirf1410642008-02-28 11:51:50 -08003814timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003815 mod_timer(&bp->timer, jiffies + bp->current_interval);
3816}
3817
3818/* end of Statistics */
3819
3820/* nic init */
3821
3822/*
3823 * nic init service functions
3824 */
3825
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003826static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003827{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003828 u32 i;
3829 if (!(len%4) && !(addr%4))
3830 for (i = 0; i < len; i += 4)
3831 REG_WR(bp, addr + i, fill);
3832 else
3833 for (i = 0; i < len; i++)
3834 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003835
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003836}
3837
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003838/* helper: writes FP SP data to FW - data_size in dwords */
3839static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3840 int fw_sb_id,
3841 u32 *sb_data_p,
3842 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003843{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003844 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003845 for (index = 0; index < data_size; index++)
3846 REG_WR(bp, BAR_CSTRORM_INTMEM +
3847 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3848 sizeof(u32)*index,
3849 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003850}
3851
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003852static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3853{
3854 u32 *sb_data_p;
3855 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003856 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003857 struct hc_status_block_data_e1x sb_data_e1x;
3858
3859 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003860 if (CHIP_IS_E2(bp)) {
3861 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3862 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3863 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3864 sb_data_e2.common.p_func.vf_valid = false;
3865 sb_data_p = (u32 *)&sb_data_e2;
3866 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3867 } else {
3868 memset(&sb_data_e1x, 0,
3869 sizeof(struct hc_status_block_data_e1x));
3870 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3871 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3872 sb_data_e1x.common.p_func.vf_valid = false;
3873 sb_data_p = (u32 *)&sb_data_e1x;
3874 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3875 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003876 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3877
3878 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3879 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3880 CSTORM_STATUS_BLOCK_SIZE);
3881 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3882 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3883 CSTORM_SYNC_BLOCK_SIZE);
3884}
3885
3886/* helper: writes SP SB data to FW */
3887static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3888 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003889{
3890 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003891 int i;
3892 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3893 REG_WR(bp, BAR_CSTRORM_INTMEM +
3894 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3895 i*sizeof(u32),
3896 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003897}
3898
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003899static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3900{
3901 int func = BP_FUNC(bp);
3902 struct hc_sp_status_block_data sp_sb_data;
3903 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3904
3905 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3906 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3907 sp_sb_data.p_func.vf_valid = false;
3908
3909 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3910
3911 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3912 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3913 CSTORM_SP_STATUS_BLOCK_SIZE);
3914 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3915 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3916 CSTORM_SP_SYNC_BLOCK_SIZE);
3917
3918}
3919
3920
3921static inline
3922void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3923 int igu_sb_id, int igu_seg_id)
3924{
3925 hc_sm->igu_sb_id = igu_sb_id;
3926 hc_sm->igu_seg_id = igu_seg_id;
3927 hc_sm->timer_value = 0xFF;
3928 hc_sm->time_to_expire = 0xFFFFFFFF;
3929}
3930
3931void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3932 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3933{
3934 int igu_seg_id;
3935
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003936 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003937 struct hc_status_block_data_e1x sb_data_e1x;
3938 struct hc_status_block_sm *hc_sm_p;
3939 struct hc_index_data *hc_index_p;
3940 int data_size;
3941 u32 *sb_data_p;
3942
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003943 if (CHIP_INT_MODE_IS_BC(bp))
3944 igu_seg_id = HC_SEG_ACCESS_NORM;
3945 else
3946 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003947
3948 bnx2x_zero_fp_sb(bp, fw_sb_id);
3949
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003950 if (CHIP_IS_E2(bp)) {
3951 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3952 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3953 sb_data_e2.common.p_func.vf_id = vfid;
3954 sb_data_e2.common.p_func.vf_valid = vf_valid;
3955 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3956 sb_data_e2.common.same_igu_sb_1b = true;
3957 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3958 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3959 hc_sm_p = sb_data_e2.common.state_machine;
3960 hc_index_p = sb_data_e2.index_data;
3961 sb_data_p = (u32 *)&sb_data_e2;
3962 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3963 } else {
3964 memset(&sb_data_e1x, 0,
3965 sizeof(struct hc_status_block_data_e1x));
3966 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3967 sb_data_e1x.common.p_func.vf_id = 0xff;
3968 sb_data_e1x.common.p_func.vf_valid = false;
3969 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3970 sb_data_e1x.common.same_igu_sb_1b = true;
3971 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3972 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3973 hc_sm_p = sb_data_e1x.common.state_machine;
3974 hc_index_p = sb_data_e1x.index_data;
3975 sb_data_p = (u32 *)&sb_data_e1x;
3976 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3977 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003978
3979 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3980 igu_sb_id, igu_seg_id);
3981 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3982 igu_sb_id, igu_seg_id);
3983
3984 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3985
3986 /* write indecies to HW */
3987 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3988}
3989
3990static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3991 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003992{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003993 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003994 u8 ticks = usec / BNX2X_BTR;
3995
3996 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3997
3998 disable = disable ? 1 : (usec ? 0 : 1);
3999 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4000}
4001
4002static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4003 u16 tx_usec, u16 rx_usec)
4004{
4005 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4006 false, rx_usec);
4007 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4008 false, tx_usec);
4009}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004010
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004011static void bnx2x_init_def_sb(struct bnx2x *bp)
4012{
4013 struct host_sp_status_block *def_sb = bp->def_status_blk;
4014 dma_addr_t mapping = bp->def_status_blk_mapping;
4015 int igu_sp_sb_index;
4016 int igu_seg_id;
4017 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004018 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004019 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004020 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004021 int index;
4022 struct hc_sp_status_block_data sp_sb_data;
4023 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4024
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004025 if (CHIP_INT_MODE_IS_BC(bp)) {
4026 igu_sp_sb_index = DEF_SB_IGU_ID;
4027 igu_seg_id = HC_SEG_ACCESS_DEF;
4028 } else {
4029 igu_sp_sb_index = bp->igu_dsb_id;
4030 igu_seg_id = IGU_SEG_ACCESS_DEF;
4031 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004032
4033 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004034 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004035 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004036 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004037
Eliezer Tamir49d66772008-02-28 11:53:13 -08004038 bp->attn_state = 0;
4039
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004040 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4041 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004042 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004043 int sindex;
4044 /* take care of sig[0]..sig[4] */
4045 for (sindex = 0; sindex < 4; sindex++)
4046 bp->attn_group[index].sig[sindex] =
4047 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004048
4049 if (CHIP_IS_E2(bp))
4050 /*
4051 * enable5 is separate from the rest of the registers,
4052 * and therefore the address skip is 4
4053 * and not 16 between the different groups
4054 */
4055 bp->attn_group[index].sig[4] = REG_RD(bp,
4056 reg_offset + 0x10 + 0x4*index);
4057 else
4058 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004059 }
4060
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004061 if (bp->common.int_block == INT_BLOCK_HC) {
4062 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4063 HC_REG_ATTN_MSG0_ADDR_L);
4064
4065 REG_WR(bp, reg_offset, U64_LO(section));
4066 REG_WR(bp, reg_offset + 4, U64_HI(section));
4067 } else if (CHIP_IS_E2(bp)) {
4068 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4069 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4070 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004071
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004072 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4073 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004074
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004075 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004076
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004077 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4078 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4079 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4080 sp_sb_data.igu_seg_id = igu_seg_id;
4081 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004082 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004083 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004084
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004085 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004086
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004087 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004088 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004089
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004090 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004091}
4092
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004093void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004094{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004095 int i;
4096
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004097 for_each_queue(bp, i)
4098 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4099 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004100}
4101
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004102static void bnx2x_init_sp_ring(struct bnx2x *bp)
4103{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004104 spin_lock_init(&bp->spq_lock);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00004105 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004106
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004107 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004108 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4109 bp->spq_prod_bd = bp->spq;
4110 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004111}
4112
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004113static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004114{
4115 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004116 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4117 union event_ring_elem *elem =
4118 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004119
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004120 elem->next_page.addr.hi =
4121 cpu_to_le32(U64_HI(bp->eq_mapping +
4122 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4123 elem->next_page.addr.lo =
4124 cpu_to_le32(U64_LO(bp->eq_mapping +
4125 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004126 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004127 bp->eq_cons = 0;
4128 bp->eq_prod = NUM_EQ_DESC;
4129 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004130}
4131
4132static void bnx2x_init_ind_table(struct bnx2x *bp)
4133{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004134 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004135 int i;
4136
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004137 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004138 return;
4139
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004140 DP(NETIF_MSG_IFUP,
4141 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004142 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004143 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004144 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004145 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004146}
4147
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004148void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004149{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004150 int mode = bp->rx_mode;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004151 u16 cl_id;
4152
Eilon Greenstein581ce432009-07-29 00:20:04 +00004153 /* All but management unicast packets should pass to the host as well */
4154 u32 llh_mask =
4155 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4156 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4157 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4158 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004159
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004160 switch (mode) {
4161 case BNX2X_RX_MODE_NONE: /* no Rx */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004162 cl_id = BP_L_ID(bp);
4163 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004164 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004165
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004166 case BNX2X_RX_MODE_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004167 cl_id = BP_L_ID(bp);
4168 bnx2x_rxq_set_mac_filters(bp, cl_id,
4169 BNX2X_ACCEPT_UNICAST |
4170 BNX2X_ACCEPT_BROADCAST |
4171 BNX2X_ACCEPT_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004172 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004173
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004174 case BNX2X_RX_MODE_ALLMULTI:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004175 cl_id = BP_L_ID(bp);
4176 bnx2x_rxq_set_mac_filters(bp, cl_id,
4177 BNX2X_ACCEPT_UNICAST |
4178 BNX2X_ACCEPT_BROADCAST |
4179 BNX2X_ACCEPT_ALL_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004180 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004181
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004182 case BNX2X_RX_MODE_PROMISC:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004183 cl_id = BP_L_ID(bp);
4184 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4185
Eilon Greenstein581ce432009-07-29 00:20:04 +00004186 /* pass management unicast packets as well */
4187 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004188 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004189
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004190 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004191 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4192 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004193 }
4194
Eilon Greenstein581ce432009-07-29 00:20:04 +00004195 REG_WR(bp,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004196 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4197 NIG_REG_LLH0_BRB1_DRV_MASK,
Eilon Greenstein581ce432009-07-29 00:20:04 +00004198 llh_mask);
4199
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004200 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4201 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4202 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4203 bp->mac_filters.ucast_drop_all,
4204 bp->mac_filters.mcast_drop_all,
4205 bp->mac_filters.bcast_drop_all,
4206 bp->mac_filters.ucast_accept_all,
4207 bp->mac_filters.mcast_accept_all,
4208 bp->mac_filters.bcast_accept_all
4209 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004210
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004211 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004212}
4213
Eilon Greenstein471de712008-08-13 15:49:35 -07004214static void bnx2x_init_internal_common(struct bnx2x *bp)
4215{
4216 int i;
4217
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004218 if (!CHIP_IS_E1(bp)) {
4219
4220 /* xstorm needs to know whether to add ovlan to packets or not,
4221 * in switch-independent we'll write 0 to here... */
4222 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004223 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004224 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004225 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004226 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004227 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004228 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004229 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004230 }
4231
Eilon Greenstein471de712008-08-13 15:49:35 -07004232 /* Zero this manually as its initialization is
4233 currently missing in the initTool */
4234 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4235 REG_WR(bp, BAR_USTRORM_INTMEM +
4236 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004237 if (CHIP_IS_E2(bp)) {
4238 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4239 CHIP_INT_MODE_IS_BC(bp) ?
4240 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4241 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004242}
4243
4244static void bnx2x_init_internal_port(struct bnx2x *bp)
4245{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004246 /* port */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004247}
4248
Eilon Greenstein471de712008-08-13 15:49:35 -07004249static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4250{
4251 switch (load_code) {
4252 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004253 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004254 bnx2x_init_internal_common(bp);
4255 /* no break */
4256
4257 case FW_MSG_CODE_DRV_LOAD_PORT:
4258 bnx2x_init_internal_port(bp);
4259 /* no break */
4260
4261 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004262 /* internal memory per function is
4263 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004264 break;
4265
4266 default:
4267 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4268 break;
4269 }
4270}
4271
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004272static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4273{
4274 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4275
4276 fp->state = BNX2X_FP_STATE_CLOSED;
4277
4278 fp->index = fp->cid = fp_idx;
4279 fp->cl_id = BP_L_ID(bp) + fp_idx;
4280 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4281 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4282 /* qZone id equals to FW (per path) client id */
4283 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004284 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4285 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004286 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004287 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4288 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004289 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4290 /* Setup SB indicies */
4291 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4292 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4293
4294 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4295 "cl_id %d fw_sb %d igu_sb %d\n",
4296 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4297 fp->igu_sb_id);
4298 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4299 fp->fw_sb_id, fp->igu_sb_id);
4300
4301 bnx2x_update_fpsb_idx(fp);
4302}
4303
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004304void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004305{
4306 int i;
4307
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004308 for_each_queue(bp, i)
4309 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004310#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004311
4312 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4313 BNX2X_VF_ID_INVALID, false,
4314 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4315
Michael Chan37b091b2009-10-10 13:46:55 +00004316#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004317
Eilon Greenstein16119782009-03-02 07:59:27 +00004318 /* ensure status block indices were read */
4319 rmb();
4320
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004321 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004322 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004323 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004324 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004325 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004326 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004327 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004328 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004329 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004330 bnx2x_stats_init(bp);
4331
4332 /* At this point, we are ready for interrupts */
4333 atomic_set(&bp->intr_sem, 0);
4334
4335 /* flush all before enabling interrupts */
4336 mb();
4337 mmiowb();
4338
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004339 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004340
4341 /* Check for SPIO5 */
4342 bnx2x_attn_int_deasserted0(bp,
4343 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4344 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004345}
4346
4347/* end of nic init */
4348
4349/*
4350 * gzip service functions
4351 */
4352
4353static int bnx2x_gunzip_init(struct bnx2x *bp)
4354{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004355 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4356 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004357 if (bp->gunzip_buf == NULL)
4358 goto gunzip_nomem1;
4359
4360 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4361 if (bp->strm == NULL)
4362 goto gunzip_nomem2;
4363
4364 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4365 GFP_KERNEL);
4366 if (bp->strm->workspace == NULL)
4367 goto gunzip_nomem3;
4368
4369 return 0;
4370
4371gunzip_nomem3:
4372 kfree(bp->strm);
4373 bp->strm = NULL;
4374
4375gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004376 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4377 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004378 bp->gunzip_buf = NULL;
4379
4380gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004381 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4382 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004383 return -ENOMEM;
4384}
4385
4386static void bnx2x_gunzip_end(struct bnx2x *bp)
4387{
4388 kfree(bp->strm->workspace);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004389 kfree(bp->strm);
4390 bp->strm = NULL;
4391
4392 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004393 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4394 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004395 bp->gunzip_buf = NULL;
4396 }
4397}
4398
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004399static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004400{
4401 int n, rc;
4402
4403 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004404 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4405 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004406 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004407 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004408
4409 n = 10;
4410
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004411#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004412
4413 if (zbuf[3] & FNAME)
4414 while ((zbuf[n++] != 0) && (n < len));
4415
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004416 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004417 bp->strm->avail_in = len - n;
4418 bp->strm->next_out = bp->gunzip_buf;
4419 bp->strm->avail_out = FW_BUF_SIZE;
4420
4421 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4422 if (rc != Z_OK)
4423 return rc;
4424
4425 rc = zlib_inflate(bp->strm, Z_FINISH);
4426 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004427 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4428 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004429
4430 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4431 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004432 netdev_err(bp->dev, "Firmware decompression error:"
4433 " gunzip_outlen (%d) not aligned\n",
4434 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004435 bp->gunzip_outlen >>= 2;
4436
4437 zlib_inflateEnd(bp->strm);
4438
4439 if (rc == Z_STREAM_END)
4440 return 0;
4441
4442 return rc;
4443}
4444
4445/* nic load/unload */
4446
4447/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004448 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004449 */
4450
4451/* send a NIG loopback debug packet */
4452static void bnx2x_lb_pckt(struct bnx2x *bp)
4453{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004454 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004455
4456 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004457 wb_write[0] = 0x55555555;
4458 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004459 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004460 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004461
4462 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004463 wb_write[0] = 0x09000000;
4464 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004465 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004466 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004467}
4468
4469/* some of the internal memories
4470 * are not directly readable from the driver
4471 * to test them we send debug packets
4472 */
4473static int bnx2x_int_mem_test(struct bnx2x *bp)
4474{
4475 int factor;
4476 int count, i;
4477 u32 val = 0;
4478
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004479 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004480 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004481 else if (CHIP_REV_IS_EMUL(bp))
4482 factor = 200;
4483 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004484 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004485
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004486 /* Disable inputs of parser neighbor blocks */
4487 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4488 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4489 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004490 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004491
4492 /* Write 0 to parser credits for CFC search request */
4493 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4494
4495 /* send Ethernet packet */
4496 bnx2x_lb_pckt(bp);
4497
4498 /* TODO do i reset NIG statistic? */
4499 /* Wait until NIG register shows 1 packet of size 0x10 */
4500 count = 1000 * factor;
4501 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004502
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004503 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4504 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004505 if (val == 0x10)
4506 break;
4507
4508 msleep(10);
4509 count--;
4510 }
4511 if (val != 0x10) {
4512 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4513 return -1;
4514 }
4515
4516 /* Wait until PRS register shows 1 packet */
4517 count = 1000 * factor;
4518 while (count) {
4519 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004520 if (val == 1)
4521 break;
4522
4523 msleep(10);
4524 count--;
4525 }
4526 if (val != 0x1) {
4527 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4528 return -2;
4529 }
4530
4531 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004532 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004533 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004535 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004536 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4537 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004538
4539 DP(NETIF_MSG_HW, "part2\n");
4540
4541 /* Disable inputs of parser neighbor blocks */
4542 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4543 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4544 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004545 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004546
4547 /* Write 0 to parser credits for CFC search request */
4548 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4549
4550 /* send 10 Ethernet packets */
4551 for (i = 0; i < 10; i++)
4552 bnx2x_lb_pckt(bp);
4553
4554 /* Wait until NIG register shows 10 + 1
4555 packets of size 11*0x10 = 0xb0 */
4556 count = 1000 * factor;
4557 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004558
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004559 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4560 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004561 if (val == 0xb0)
4562 break;
4563
4564 msleep(10);
4565 count--;
4566 }
4567 if (val != 0xb0) {
4568 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4569 return -3;
4570 }
4571
4572 /* Wait until PRS register shows 2 packets */
4573 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4574 if (val != 2)
4575 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4576
4577 /* Write 1 to parser credits for CFC search request */
4578 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4579
4580 /* Wait until PRS register shows 3 packets */
4581 msleep(10 * factor);
4582 /* Wait until NIG register shows 1 packet of size 0x10 */
4583 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4584 if (val != 3)
4585 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4586
4587 /* clear NIG EOP FIFO */
4588 for (i = 0; i < 11; i++)
4589 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4590 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4591 if (val != 1) {
4592 BNX2X_ERR("clear of NIG failed\n");
4593 return -4;
4594 }
4595
4596 /* Reset and init BRB, PRS, NIG */
4597 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4598 msleep(50);
4599 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4600 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004601 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4602 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004603#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004604 /* set NIC mode */
4605 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4606#endif
4607
4608 /* Enable inputs of parser neighbor blocks */
4609 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4610 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4611 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004612 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004613
4614 DP(NETIF_MSG_HW, "done\n");
4615
4616 return 0; /* OK */
4617}
4618
4619static void enable_blocks_attention(struct bnx2x *bp)
4620{
4621 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004622 if (CHIP_IS_E2(bp))
4623 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4624 else
4625 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004626 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4627 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004628 /*
4629 * mask read length error interrupts in brb for parser
4630 * (parsing unit and 'checksum and crc' unit)
4631 * these errors are legal (PU reads fixed length and CAC can cause
4632 * read length error on truncated packets)
4633 */
4634 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004635 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4636 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4637 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4638 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4639 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004640/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4641/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004642 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4643 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4644 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004645/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4646/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004647 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4648 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4649 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4650 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004651/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4652/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004653
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004654 if (CHIP_REV_IS_FPGA(bp))
4655 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004656 else if (CHIP_IS_E2(bp))
4657 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4658 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4659 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4660 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4661 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4662 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004663 else
4664 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004665 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4666 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4667 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004668/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4669/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004670 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4671 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004672/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4673 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004674}
4675
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004676static const struct {
4677 u32 addr;
4678 u32 mask;
4679} bnx2x_parity_mask[] = {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004680 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4681 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4682 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4683 {HC_REG_HC_PRTY_MASK, 0x7},
4684 {MISC_REG_MISC_PRTY_MASK, 0x1},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004685 {QM_REG_QM_PRTY_MASK, 0x0},
4686 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004687 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4688 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004689 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4690 {CDU_REG_CDU_PRTY_MASK, 0x0},
4691 {CFC_REG_CFC_PRTY_MASK, 0x0},
4692 {DBG_REG_DBG_PRTY_MASK, 0x0},
4693 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4694 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4695 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4696 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4697 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4698 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4699 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4700 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4701 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4702 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4703 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4704 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4705 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4706 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4707 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004708};
4709
4710static void enable_blocks_parity(struct bnx2x *bp)
4711{
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004712 int i;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004713
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004714 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004715 REG_WR(bp, bnx2x_parity_mask[i].addr,
4716 bnx2x_parity_mask[i].mask);
4717}
4718
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004719
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004720static void bnx2x_reset_common(struct bnx2x *bp)
4721{
4722 /* reset_common */
4723 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4724 0xd3ffff7f);
4725 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4726}
4727
Eilon Greenstein573f2032009-08-12 08:24:14 +00004728static void bnx2x_init_pxp(struct bnx2x *bp)
4729{
4730 u16 devctl;
4731 int r_order, w_order;
4732
4733 pci_read_config_word(bp->pdev,
4734 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4735 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4736 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4737 if (bp->mrrs == -1)
4738 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4739 else {
4740 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4741 r_order = bp->mrrs;
4742 }
4743
4744 bnx2x_init_pxp_arb(bp, r_order, w_order);
4745}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004746
4747static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4748{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004749 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004750 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004751 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004752
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004753 if (BP_NOMCP(bp))
4754 return;
4755
4756 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004757 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4758 SHARED_HW_CFG_FAN_FAILURE_MASK;
4759
4760 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4761 is_required = 1;
4762
4763 /*
4764 * The fan failure mechanism is usually related to the PHY type since
4765 * the power consumption of the board is affected by the PHY. Currently,
4766 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4767 */
4768 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4769 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004770 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004771 bnx2x_fan_failure_det_req(
4772 bp,
4773 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004774 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004775 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004776 }
4777
4778 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4779
4780 if (is_required == 0)
4781 return;
4782
4783 /* Fan failure is indicated by SPIO 5 */
4784 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4785 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4786
4787 /* set to active low mode */
4788 val = REG_RD(bp, MISC_REG_SPIO_INT);
4789 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004790 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004791 REG_WR(bp, MISC_REG_SPIO_INT, val);
4792
4793 /* enable interrupt to signal the IGU */
4794 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4795 val |= (1 << MISC_REGISTERS_SPIO_5);
4796 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4797}
4798
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004799static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4800{
4801 u32 offset = 0;
4802
4803 if (CHIP_IS_E1(bp))
4804 return;
4805 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4806 return;
4807
4808 switch (BP_ABS_FUNC(bp)) {
4809 case 0:
4810 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4811 break;
4812 case 1:
4813 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4814 break;
4815 case 2:
4816 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4817 break;
4818 case 3:
4819 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4820 break;
4821 case 4:
4822 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4823 break;
4824 case 5:
4825 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4826 break;
4827 case 6:
4828 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4829 break;
4830 case 7:
4831 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4832 break;
4833 default:
4834 return;
4835 }
4836
4837 REG_WR(bp, offset, pretend_func_num);
4838 REG_RD(bp, offset);
4839 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4840}
4841
4842static void bnx2x_pf_disable(struct bnx2x *bp)
4843{
4844 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4845 val &= ~IGU_PF_CONF_FUNC_EN;
4846
4847 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4848 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4849 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4850}
4851
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004852static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004853{
4854 u32 val, i;
4855
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004856 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004857
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004858 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004859 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4860 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4861
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004862 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004863 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004864 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004865
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004866 if (CHIP_IS_E2(bp)) {
4867 u8 fid;
4868
4869 /**
4870 * 4-port mode or 2-port mode we need to turn of master-enable
4871 * for everyone, after that, turn it back on for self.
4872 * so, we disregard multi-function or not, and always disable
4873 * for all functions on the given path, this means 0,2,4,6 for
4874 * path 0 and 1,3,5,7 for path 1
4875 */
4876 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4877 if (fid == BP_ABS_FUNC(bp)) {
4878 REG_WR(bp,
4879 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4880 1);
4881 continue;
4882 }
4883
4884 bnx2x_pretend_func(bp, fid);
4885 /* clear pf enable */
4886 bnx2x_pf_disable(bp);
4887 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4888 }
4889 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004890
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004891 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004892 if (CHIP_IS_E1(bp)) {
4893 /* enable HW interrupt from PXP on USDM overflow
4894 bit 16 on INT_MASK_0 */
4895 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004896 }
4897
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004898 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004899 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004900
4901#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004902 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4903 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4904 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4905 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4906 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004907 /* make sure this value is 0 */
4908 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004909
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004910/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4911 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4912 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4913 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4914 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004915#endif
4916
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004917 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4918
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004919 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4920 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004921
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004922 /* let the HW do it's magic ... */
4923 msleep(100);
4924 /* finish PXP init */
4925 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4926 if (val != 1) {
4927 BNX2X_ERR("PXP2 CFG failed\n");
4928 return -EBUSY;
4929 }
4930 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4931 if (val != 1) {
4932 BNX2X_ERR("PXP2 RD_INIT failed\n");
4933 return -EBUSY;
4934 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004935
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004936 /* Timers bug workaround E2 only. We need to set the entire ILT to
4937 * have entries with value "0" and valid bit on.
4938 * This needs to be done by the first PF that is loaded in a path
4939 * (i.e. common phase)
4940 */
4941 if (CHIP_IS_E2(bp)) {
4942 struct ilt_client_info ilt_cli;
4943 struct bnx2x_ilt ilt;
4944 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4945 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4946
4947 /* initalize dummy TM client */
4948 ilt_cli.start = 0;
4949 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4950 ilt_cli.client_num = ILT_CLIENT_TM;
4951
4952 /* Step 1: set zeroes to all ilt page entries with valid bit on
4953 * Step 2: set the timers first/last ilt entry to point
4954 * to the entire range to prevent ILT range error for 3rd/4th
4955 * vnic (this code assumes existance of the vnic)
4956 *
4957 * both steps performed by call to bnx2x_ilt_client_init_op()
4958 * with dummy TM client
4959 *
4960 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4961 * and his brother are split registers
4962 */
4963 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4964 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4965 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4966
4967 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4968 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4969 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4970 }
4971
4972
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004973 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4974 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004975
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004976 if (CHIP_IS_E2(bp)) {
4977 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4978 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4979 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4980
4981 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4982
4983 /* let the HW do it's magic ... */
4984 do {
4985 msleep(200);
4986 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4987 } while (factor-- && (val != 1));
4988
4989 if (val != 1) {
4990 BNX2X_ERR("ATC_INIT failed\n");
4991 return -EBUSY;
4992 }
4993 }
4994
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004995 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004996
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004997 /* clean the DMAE memory */
4998 bp->dmae_ready = 1;
4999 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005000
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005001 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5002 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5003 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5004 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005005
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005006 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5007 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5008 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5009 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5010
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005011 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005012
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005013 if (CHIP_MODE_IS_4_PORT(bp))
5014 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005015
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005016 /* QM queues pointers table */
5017 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005018
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005019 /* soft reset pulse */
5020 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5021 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005022
Michael Chan37b091b2009-10-10 13:46:55 +00005023#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005024 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005025#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005026
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005027 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005028 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5029
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005030 if (!CHIP_REV_IS_SLOW(bp)) {
5031 /* enable hw interrupt from doorbell Q */
5032 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5033 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005034
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005035 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005036 if (CHIP_MODE_IS_4_PORT(bp)) {
5037 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5038 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5039 }
5040
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005041 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005042 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005043#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005044 /* set NIC mode */
5045 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005046#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005047 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005048 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005049
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005050 if (CHIP_IS_E2(bp)) {
5051 /* Bit-map indicating which L2 hdrs may appear after the
5052 basic Ethernet header */
5053 int has_ovlan = IS_MF(bp);
5054 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5055 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5056 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005057
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005058 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5059 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5060 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5061 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005062
Eilon Greensteinca003922009-08-12 22:53:28 -07005063 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5064 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5065 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5066 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005067
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005068 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5069 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5070 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5071 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005072
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005073 if (CHIP_MODE_IS_4_PORT(bp))
5074 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5075
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005076 /* sync semi rtc */
5077 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5078 0x80000000);
5079 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5080 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005081
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005082 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5083 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5084 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005085
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005086 if (CHIP_IS_E2(bp)) {
5087 int has_ovlan = IS_MF(bp);
5088 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5089 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5090 }
5091
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005092 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005093 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5094 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005095
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005096 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005097#ifdef BCM_CNIC
5098 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5099 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5100 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5101 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5102 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5103 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5104 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5105 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5106 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5107 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5108#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005109 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005110
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005111 if (sizeof(union cdu_context) != 1024)
5112 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005113 dev_alert(&bp->pdev->dev, "please adjust the size "
5114 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005115 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005116
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005117 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005118 val = (4 << 24) + (0 << 12) + 1024;
5119 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005120
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005121 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005122 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005123 /* enable context validation interrupt from CFC */
5124 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5125
5126 /* set the thresholds to prevent CFC/CDU race */
5127 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005128
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005129 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005130
5131 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5132 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5133
5134 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005135 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005136
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005137 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005138 /* Reset PCIE errors for debug */
5139 REG_WR(bp, 0x2814, 0xffffffff);
5140 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005141
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005142 if (CHIP_IS_E2(bp)) {
5143 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5144 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5145 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5146 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5147 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5148 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5149 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5150 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5151 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5152 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5153 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5154 }
5155
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005156 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005157 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005158 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005159 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005160
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005161 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005162 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005163 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5164 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005165 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005166 if (CHIP_IS_E2(bp)) {
5167 /* Bit-map indicating which L2 hdrs may appear after the
5168 basic Ethernet header */
5169 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5170 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005171
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005172 if (CHIP_REV_IS_SLOW(bp))
5173 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005174
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005175 /* finish CFC init */
5176 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5177 if (val != 1) {
5178 BNX2X_ERR("CFC LL_INIT failed\n");
5179 return -EBUSY;
5180 }
5181 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5182 if (val != 1) {
5183 BNX2X_ERR("CFC AC_INIT failed\n");
5184 return -EBUSY;
5185 }
5186 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5187 if (val != 1) {
5188 BNX2X_ERR("CFC CAM_INIT failed\n");
5189 return -EBUSY;
5190 }
5191 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005192
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005193 if (CHIP_IS_E1(bp)) {
5194 /* read NIG statistic
5195 to see if this is our first up since powerup */
5196 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5197 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005198
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005199 /* do internal memory self test */
5200 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5201 BNX2X_ERR("internal mem self test failed\n");
5202 return -EBUSY;
5203 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005204 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005205
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005206 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005207 bp->common.shmem_base,
5208 bp->common.shmem2_base);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005209
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005210 bnx2x_setup_fan_failure_detection(bp);
5211
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005212 /* clear PXP2 attentions */
5213 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005214
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005215 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005216 if (CHIP_PARITY_SUPPORTED(bp))
5217 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005218
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005219 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005220 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5221 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5222 CHIP_IS_E1x(bp)) {
5223 u32 shmem_base[2], shmem2_base[2];
5224 shmem_base[0] = bp->common.shmem_base;
5225 shmem2_base[0] = bp->common.shmem2_base;
5226 if (CHIP_IS_E2(bp)) {
5227 shmem_base[1] =
5228 SHMEM2_RD(bp, other_shmem_base_addr);
5229 shmem2_base[1] =
5230 SHMEM2_RD(bp, other_shmem2_base_addr);
5231 }
5232 bnx2x_acquire_phy_lock(bp);
5233 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5234 bp->common.chip_id);
5235 bnx2x_release_phy_lock(bp);
5236 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005237 } else
5238 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5239
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005240 return 0;
5241}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005242
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005243static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005244{
5245 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005246 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005247 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005248 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005249
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005250 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005251
5252 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005253
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005254 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005255 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005256
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005257 /* Timers bug workaround: disables the pf_master bit in pglue at
5258 * common phase, we need to enable it here before any dmae access are
5259 * attempted. Therefore we manually added the enable-master to the
5260 * port phase (it also happens in the function phase)
5261 */
5262 if (CHIP_IS_E2(bp))
5263 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5264
Eilon Greensteinca003922009-08-12 22:53:28 -07005265 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5266 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5267 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005268 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005269
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005270 /* QM cid (connection) count */
5271 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005272
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005273#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005274 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005275 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5276 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005277#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005278
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005279 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005280
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005281 if (CHIP_MODE_IS_4_PORT(bp))
5282 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005283
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005284 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5285 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5286 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5287 /* no pause for emulation and FPGA */
5288 low = 0;
5289 high = 513;
5290 } else {
5291 if (IS_MF(bp))
5292 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5293 else if (bp->dev->mtu > 4096) {
5294 if (bp->flags & ONE_PORT_FLAG)
5295 low = 160;
5296 else {
5297 val = bp->dev->mtu;
5298 /* (24*1024 + val*4)/256 */
5299 low = 96 + (val/64) +
5300 ((val % 64) ? 1 : 0);
5301 }
5302 } else
5303 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5304 high = low + 56; /* 14*1024/256 */
5305 }
5306 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5307 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5308 }
5309
5310 if (CHIP_MODE_IS_4_PORT(bp)) {
5311 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5312 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5313 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5314 BRB1_REG_MAC_GUARANTIED_0), 40);
5315 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005316
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005317 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005318
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005319 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005320 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005321 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005322 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005323
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005324 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5325 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5326 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5327 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005328 if (CHIP_MODE_IS_4_PORT(bp))
5329 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005330
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005331 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005332 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005333
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005334 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005335
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005336 if (!CHIP_IS_E2(bp)) {
5337 /* configure PBF to work without PAUSE mtu 9000 */
5338 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005339
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005340 /* update threshold */
5341 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5342 /* update init credit */
5343 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005344
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005345 /* probe changes */
5346 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5347 udelay(50);
5348 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5349 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005350
Michael Chan37b091b2009-10-10 13:46:55 +00005351#ifdef BCM_CNIC
5352 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005353#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005354 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005355 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005356
5357 if (CHIP_IS_E1(bp)) {
5358 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5359 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5360 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005361 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005362
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005363 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5364
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005365 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005366 /* init aeu_mask_attn_func_0/1:
5367 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5368 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5369 * bits 4-7 are used for "per vn group attention" */
5370 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005371 (IS_MF(bp) ? 0xF7 : 0x7));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005372
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005373 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005374 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005375 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005376 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005377 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005378
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005379 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005380
5381 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5382
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005383 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005384 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005385 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005386 (IS_MF(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005387
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005388 if (CHIP_IS_E2(bp)) {
5389 val = 0;
5390 switch (bp->mf_mode) {
5391 case MULTI_FUNCTION_SD:
5392 val = 1;
5393 break;
5394 case MULTI_FUNCTION_SI:
5395 val = 2;
5396 break;
5397 }
5398
5399 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5400 NIG_REG_LLH0_CLS_TYPE), val);
5401 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005402 {
5403 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5404 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5405 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5406 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005407 }
5408
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005409 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005410 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005411 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005412 bp->common.shmem_base,
5413 bp->common.shmem2_base);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005414 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005415 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005416 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5417 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5418 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005419 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005420 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005421 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005422 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005423
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005424 return 0;
5425}
5426
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005427static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5428{
5429 int reg;
5430
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005431 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005432 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005433 else
5434 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005435
5436 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5437}
5438
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005439static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5440{
5441 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5442}
5443
5444static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5445{
5446 u32 i, base = FUNC_ILT_BASE(func);
5447 for (i = base; i < base + ILT_PER_FUNC; i++)
5448 bnx2x_ilt_wr(bp, i, 0);
5449}
5450
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005451static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005452{
5453 int port = BP_PORT(bp);
5454 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005455 struct bnx2x_ilt *ilt = BP_ILT(bp);
5456 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005457 u32 addr, val;
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005458 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5459 int i, main_mem_width;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005460
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005461 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005462
Eilon Greenstein8badd272009-02-12 08:36:15 +00005463 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005464 if (bp->common.int_block == INT_BLOCK_HC) {
5465 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5466 val = REG_RD(bp, addr);
5467 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5468 REG_WR(bp, addr, val);
5469 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005470
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005471 ilt = BP_ILT(bp);
5472 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005473
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005474 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5475 ilt->lines[cdu_ilt_start + i].page =
5476 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5477 ilt->lines[cdu_ilt_start + i].page_mapping =
5478 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5479 /* cdu ilt pages are allocated manually so there's no need to
5480 set the size */
5481 }
5482 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005483
Michael Chan37b091b2009-10-10 13:46:55 +00005484#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005485 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005486
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005487 /* T1 hash bits value determines the T1 number of entries */
5488 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005489#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005490
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005491#ifndef BCM_CNIC
5492 /* set NIC mode */
5493 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5494#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005495
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005496 if (CHIP_IS_E2(bp)) {
5497 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5498
5499 /* Turn on a single ISR mode in IGU if driver is going to use
5500 * INT#x or MSI
5501 */
5502 if (!(bp->flags & USING_MSIX_FLAG))
5503 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5504 /*
5505 * Timers workaround bug: function init part.
5506 * Need to wait 20msec after initializing ILT,
5507 * needed to make sure there are no requests in
5508 * one of the PXP internal queues with "old" ILT addresses
5509 */
5510 msleep(20);
5511 /*
5512 * Master enable - Due to WB DMAE writes performed before this
5513 * register is re-initialized as part of the regular function
5514 * init
5515 */
5516 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5517 /* Enable the function in IGU */
5518 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5519 }
5520
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005521 bp->dmae_ready = 1;
5522
5523 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5524
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005525 if (CHIP_IS_E2(bp))
5526 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5527
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005528 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5529 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5530 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5531 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5532 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5533 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5534 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5535 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5536 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5537
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005538 if (CHIP_IS_E2(bp)) {
5539 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5540 BP_PATH(bp));
5541 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5542 BP_PATH(bp));
5543 }
5544
5545 if (CHIP_MODE_IS_4_PORT(bp))
5546 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5547
5548 if (CHIP_IS_E2(bp))
5549 REG_WR(bp, QM_REG_PF_EN, 1);
5550
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005551 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005552
5553 if (CHIP_MODE_IS_4_PORT(bp))
5554 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5555
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005556 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5557 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5558 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5559 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5560 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5561 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5562 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5563 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5564 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5565 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5566 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005567 if (CHIP_IS_E2(bp))
5568 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5569
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005570 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5571
5572 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5573
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005574 if (CHIP_IS_E2(bp))
5575 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5576
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005577 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005578 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005579 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005580 }
5581
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005582 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5583
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005584 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005585 if (bp->common.int_block == INT_BLOCK_HC) {
5586 if (CHIP_IS_E1H(bp)) {
5587 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5588
5589 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5590 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5591 }
5592 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5593
5594 } else {
5595 int num_segs, sb_idx, prod_offset;
5596
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005597 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5598
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005599 if (CHIP_IS_E2(bp)) {
5600 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5601 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5602 }
5603
5604 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5605
5606 if (CHIP_IS_E2(bp)) {
5607 int dsb_idx = 0;
5608 /**
5609 * Producer memory:
5610 * E2 mode: address 0-135 match to the mapping memory;
5611 * 136 - PF0 default prod; 137 - PF1 default prod;
5612 * 138 - PF2 default prod; 139 - PF3 default prod;
5613 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5614 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5615 * 144-147 reserved.
5616 *
5617 * E1.5 mode - In backward compatible mode;
5618 * for non default SB; each even line in the memory
5619 * holds the U producer and each odd line hold
5620 * the C producer. The first 128 producers are for
5621 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5622 * producers are for the DSB for each PF.
5623 * Each PF has five segments: (the order inside each
5624 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5625 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5626 * 144-147 attn prods;
5627 */
5628 /* non-default-status-blocks */
5629 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5630 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5631 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5632 prod_offset = (bp->igu_base_sb + sb_idx) *
5633 num_segs;
5634
5635 for (i = 0; i < num_segs; i++) {
5636 addr = IGU_REG_PROD_CONS_MEMORY +
5637 (prod_offset + i) * 4;
5638 REG_WR(bp, addr, 0);
5639 }
5640 /* send consumer update with value 0 */
5641 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5642 USTORM_ID, 0, IGU_INT_NOP, 1);
5643 bnx2x_igu_clear_sb(bp,
5644 bp->igu_base_sb + sb_idx);
5645 }
5646
5647 /* default-status-blocks */
5648 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5649 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5650
5651 if (CHIP_MODE_IS_4_PORT(bp))
5652 dsb_idx = BP_FUNC(bp);
5653 else
5654 dsb_idx = BP_E1HVN(bp);
5655
5656 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5657 IGU_BC_BASE_DSB_PROD + dsb_idx :
5658 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5659
5660 for (i = 0; i < (num_segs * E1HVN_MAX);
5661 i += E1HVN_MAX) {
5662 addr = IGU_REG_PROD_CONS_MEMORY +
5663 (prod_offset + i)*4;
5664 REG_WR(bp, addr, 0);
5665 }
5666 /* send consumer update with 0 */
5667 if (CHIP_INT_MODE_IS_BC(bp)) {
5668 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5669 USTORM_ID, 0, IGU_INT_NOP, 1);
5670 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5671 CSTORM_ID, 0, IGU_INT_NOP, 1);
5672 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5673 XSTORM_ID, 0, IGU_INT_NOP, 1);
5674 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5675 TSTORM_ID, 0, IGU_INT_NOP, 1);
5676 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5677 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5678 } else {
5679 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5680 USTORM_ID, 0, IGU_INT_NOP, 1);
5681 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5682 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5683 }
5684 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5685
5686 /* !!! these should become driver const once
5687 rf-tool supports split-68 const */
5688 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5689 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5690 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5691 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5692 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5693 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5694 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005695 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005696
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005697 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005698 REG_WR(bp, 0x2114, 0xffffffff);
5699 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005700
5701 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5702 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5703 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5704 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5705 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5706 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5707
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005708 if (CHIP_IS_E1x(bp)) {
5709 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5710 main_mem_base = HC_REG_MAIN_MEMORY +
5711 BP_PORT(bp) * (main_mem_size * 4);
5712 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5713 main_mem_width = 8;
5714
5715 val = REG_RD(bp, main_mem_prty_clr);
5716 if (val)
5717 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5718 "block during "
5719 "function init (0x%x)!\n", val);
5720
5721 /* Clear "false" parity errors in MSI-X table */
5722 for (i = main_mem_base;
5723 i < main_mem_base + main_mem_size * 4;
5724 i += main_mem_width) {
5725 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5726 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5727 i, main_mem_width / 4);
5728 }
5729 /* Clear HC parity attention */
5730 REG_RD(bp, main_mem_prty_clr);
5731 }
5732
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005733 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005734
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005735 return 0;
5736}
5737
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005738int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005739{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005740 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005741
5742 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005743 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005744
5745 bp->dmae_ready = 0;
5746 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00005747 rc = bnx2x_gunzip_init(bp);
5748 if (rc)
5749 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005750
5751 switch (load_code) {
5752 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005753 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005754 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005755 if (rc)
5756 goto init_hw_err;
5757 /* no break */
5758
5759 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005760 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005761 if (rc)
5762 goto init_hw_err;
5763 /* no break */
5764
5765 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005766 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005767 if (rc)
5768 goto init_hw_err;
5769 break;
5770
5771 default:
5772 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5773 break;
5774 }
5775
5776 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005777 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005778
5779 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005780 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005781 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005782 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5783 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005784
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005785init_hw_err:
5786 bnx2x_gunzip_end(bp);
5787
5788 return rc;
5789}
5790
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005791void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005792{
5793
5794#define BNX2X_PCI_FREE(x, y, size) \
5795 do { \
5796 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005797 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005798 x = NULL; \
5799 y = 0; \
5800 } \
5801 } while (0)
5802
5803#define BNX2X_FREE(x) \
5804 do { \
5805 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005806 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005807 x = NULL; \
5808 } \
5809 } while (0)
5810
5811 int i;
5812
5813 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005814 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005815 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005816 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005817 if (CHIP_IS_E2(bp))
5818 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5819 bnx2x_fp(bp, i, status_blk_mapping),
5820 sizeof(struct host_hc_status_block_e2));
5821 else
5822 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5823 bnx2x_fp(bp, i, status_blk_mapping),
5824 sizeof(struct host_hc_status_block_e1x));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005825 }
5826 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005827 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005828
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005829 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005830 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5831 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5832 bnx2x_fp(bp, i, rx_desc_mapping),
5833 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5834
5835 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5836 bnx2x_fp(bp, i, rx_comp_mapping),
5837 sizeof(struct eth_fast_path_rx_cqe) *
5838 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005839
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005840 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005841 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005842 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5843 bnx2x_fp(bp, i, rx_sge_mapping),
5844 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5845 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005846 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005847 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005848
5849 /* fastpath tx rings: tx_buf tx_desc */
5850 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5851 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5852 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005853 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005854 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005855 /* end of fastpath */
5856
5857 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005858 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005859
5860 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005861 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005862
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005863 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5864 bp->context.size);
5865
5866 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5867
5868 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005869
Michael Chan37b091b2009-10-10 13:46:55 +00005870#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005871 if (CHIP_IS_E2(bp))
5872 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5873 sizeof(struct host_hc_status_block_e2));
5874 else
5875 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5876 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005877
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005878 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005879#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005880
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005881 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005882
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005883 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5884 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5885
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005886#undef BNX2X_PCI_FREE
5887#undef BNX2X_KFREE
5888}
5889
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005890static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5891{
5892 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5893 if (CHIP_IS_E2(bp)) {
5894 bnx2x_fp(bp, index, sb_index_values) =
5895 (__le16 *)status_blk.e2_sb->sb.index_values;
5896 bnx2x_fp(bp, index, sb_running_index) =
5897 (__le16 *)status_blk.e2_sb->sb.running_index;
5898 } else {
5899 bnx2x_fp(bp, index, sb_index_values) =
5900 (__le16 *)status_blk.e1x_sb->sb.index_values;
5901 bnx2x_fp(bp, index, sb_running_index) =
5902 (__le16 *)status_blk.e1x_sb->sb.running_index;
5903 }
5904}
5905
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005906int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005907{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005908#define BNX2X_PCI_ALLOC(x, y, size) \
5909 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005910 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005911 if (x == NULL) \
5912 goto alloc_mem_err; \
5913 memset(x, 0, size); \
5914 } while (0)
5915
5916#define BNX2X_ALLOC(x, size) \
5917 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005918 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005919 if (x == NULL) \
5920 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005921 } while (0)
5922
5923 int i;
5924
5925 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005926 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005927 for_each_queue(bp, i) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005928 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005929 bnx2x_fp(bp, i, bp) = bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005930 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005931 if (CHIP_IS_E2(bp))
5932 BNX2X_PCI_ALLOC(sb->e2_sb,
5933 &bnx2x_fp(bp, i, status_blk_mapping),
5934 sizeof(struct host_hc_status_block_e2));
5935 else
5936 BNX2X_PCI_ALLOC(sb->e1x_sb,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005937 &bnx2x_fp(bp, i, status_blk_mapping),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005938 sizeof(struct host_hc_status_block_e1x));
5939
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005940 set_sb_shortcuts(bp, i);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005941 }
5942 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005943 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005944
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005945 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005946 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5947 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5948 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5949 &bnx2x_fp(bp, i, rx_desc_mapping),
5950 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5951
5952 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5953 &bnx2x_fp(bp, i, rx_comp_mapping),
5954 sizeof(struct eth_fast_path_rx_cqe) *
5955 NUM_RCQ_BD);
5956
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005957 /* SGE ring */
5958 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5959 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5960 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5961 &bnx2x_fp(bp, i, rx_sge_mapping),
5962 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005963 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005964 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005965 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005966
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005967 /* fastpath tx rings: tx_buf tx_desc */
5968 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5969 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5970 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5971 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005972 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005973 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005974 /* end of fastpath */
5975
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005976#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005977 if (CHIP_IS_E2(bp))
5978 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5979 sizeof(struct host_hc_status_block_e2));
5980 else
5981 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5982 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005983
5984 /* allocate searcher T2 table */
5985 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5986#endif
5987
5988
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005989 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005990 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005991
5992 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5993 sizeof(struct bnx2x_slowpath));
5994
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005995 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005996
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005997 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5998 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005999
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006000 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006001
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006002 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6003 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006004
6005 /* Slow path ring */
6006 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6007
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006008 /* EQ */
6009 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6010 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006011 return 0;
6012
6013alloc_mem_err:
6014 bnx2x_free_mem(bp);
6015 return -ENOMEM;
6016
6017#undef BNX2X_PCI_ALLOC
6018#undef BNX2X_ALLOC
6019}
6020
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006021/*
6022 * Init service functions
6023 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006024int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006025{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006026 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006027
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006028 /* Wait for completion */
6029 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6030 WAIT_RAMROD_COMMON);
6031}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006032
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006033int bnx2x_func_stop(struct bnx2x *bp)
6034{
6035 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006036
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006037 /* Wait for completion */
6038 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6039 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006040}
6041
Michael Chane665bfd2009-10-10 13:46:54 +00006042/**
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006043 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
Michael Chane665bfd2009-10-10 13:46:54 +00006044 *
6045 * @param bp driver descriptor
6046 * @param set set or clear an entry (1 or 0)
6047 * @param mac pointer to a buffer containing a MAC
6048 * @param cl_bit_vec bit vector of clients to register a MAC for
6049 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006050 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00006051 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006052static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006053 u32 cl_bit_vec, u8 cam_offset,
6054 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006055{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006056 struct mac_configuration_cmd *config =
6057 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6058 int ramrod_flags = WAIT_RAMROD_COMMON;
6059
6060 bp->set_mac_pending = 1;
6061 smp_wmb();
6062
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006063 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006064 config->hdr.offset = cam_offset;
6065 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006066 config->hdr.reserved1 = 0;
6067
6068 /* primary MAC */
6069 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006070 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006071 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006072 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006073 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006074 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006075 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00006076 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006077 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006078 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006079 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006080 SET_FLAG(config->config_table[0].flags,
6081 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6082 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006083 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006084 SET_FLAG(config->config_table[0].flags,
6085 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6086 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006087
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006088 if (is_bcast)
6089 SET_FLAG(config->config_table[0].flags,
6090 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6091
6092 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006093 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006094 config->config_table[0].msb_mac_addr,
6095 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006096 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006097
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006098 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006099 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006100 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6101
6102 /* Wait for a completion */
6103 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006104}
6105
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006106int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006107 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006108{
6109 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006110 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006111 u8 poll = flags & WAIT_RAMROD_POLL;
6112 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006113
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006114 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6115 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006116
6117 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006118 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006119 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006120 if (common)
6121 bnx2x_eq_int(bp);
6122 else {
6123 bnx2x_rx_int(bp->fp, 10);
6124 /* if index is different from 0
6125 * the reply for some commands will
6126 * be on the non default queue
6127 */
6128 if (idx)
6129 bnx2x_rx_int(&bp->fp[idx], 10);
6130 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006131 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006132
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006133 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006134 if (*state_p == state) {
6135#ifdef BNX2X_STOP_ON_ERROR
6136 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6137#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006138 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006139 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006140
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006141 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006142
6143 if (bp->panic)
6144 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006145 }
6146
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006147 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006148 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6149 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006150#ifdef BNX2X_STOP_ON_ERROR
6151 bnx2x_panic();
6152#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006153
Eliezer Tamir49d66772008-02-28 11:53:13 -08006154 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006155}
6156
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006157u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00006158{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006159 if (CHIP_IS_E1H(bp))
6160 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6161 else if (CHIP_MODE_IS_4_PORT(bp))
6162 return BP_FUNC(bp) * 32 + rel_offset;
6163 else
6164 return BP_VN(bp) * 32 + rel_offset;
Michael Chane665bfd2009-10-10 13:46:54 +00006165}
6166
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006167void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00006168{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006169 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6170 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6171
6172 /* networking MAC */
6173 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6174 (1 << bp->fp->cl_id), cam_offset , 0);
6175
6176 if (CHIP_IS_E1(bp)) {
6177 /* broadcast MAC */
6178 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6179 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6180 }
6181}
6182static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6183{
6184 int i = 0, old;
6185 struct net_device *dev = bp->dev;
6186 struct netdev_hw_addr *ha;
6187 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6188 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6189
6190 netdev_for_each_mc_addr(ha, dev) {
6191 /* copy mac */
6192 config_cmd->config_table[i].msb_mac_addr =
6193 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6194 config_cmd->config_table[i].middle_mac_addr =
6195 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6196 config_cmd->config_table[i].lsb_mac_addr =
6197 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6198
6199 config_cmd->config_table[i].vlan_id = 0;
6200 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6201 config_cmd->config_table[i].clients_bit_vector =
6202 cpu_to_le32(1 << BP_L_ID(bp));
6203
6204 SET_FLAG(config_cmd->config_table[i].flags,
6205 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6206 T_ETH_MAC_COMMAND_SET);
6207
6208 DP(NETIF_MSG_IFUP,
6209 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6210 config_cmd->config_table[i].msb_mac_addr,
6211 config_cmd->config_table[i].middle_mac_addr,
6212 config_cmd->config_table[i].lsb_mac_addr);
6213 i++;
6214 }
6215 old = config_cmd->hdr.length;
6216 if (old > i) {
6217 for (; i < old; i++) {
6218 if (CAM_IS_INVALID(config_cmd->
6219 config_table[i])) {
6220 /* already invalidated */
6221 break;
6222 }
6223 /* invalidate */
6224 SET_FLAG(config_cmd->config_table[i].flags,
6225 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6226 T_ETH_MAC_COMMAND_INVALIDATE);
6227 }
6228 }
6229
6230 config_cmd->hdr.length = i;
6231 config_cmd->hdr.offset = offset;
6232 config_cmd->hdr.client_id = 0xff;
6233 config_cmd->hdr.reserved1 = 0;
6234
6235 bp->set_mac_pending = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006236 smp_wmb();
6237
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006238 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6239 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6240}
6241static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6242{
6243 int i;
6244 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6245 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6246 int ramrod_flags = WAIT_RAMROD_COMMON;
6247
6248 bp->set_mac_pending = 1;
6249 smp_wmb();
6250
6251 for (i = 0; i < config_cmd->hdr.length; i++)
6252 SET_FLAG(config_cmd->config_table[i].flags,
6253 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6254 T_ETH_MAC_COMMAND_INVALIDATE);
6255
6256 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6257 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00006258
6259 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006260 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6261 ramrod_flags);
6262
Michael Chane665bfd2009-10-10 13:46:54 +00006263}
6264
Michael Chan993ac7b2009-10-10 13:46:56 +00006265#ifdef BCM_CNIC
6266/**
6267 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6268 * MAC(s). This function will wait until the ramdord completion
6269 * returns.
6270 *
6271 * @param bp driver handle
6272 * @param set set or clear the CAM entry
6273 *
6274 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6275 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006276int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006277{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006278 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6279 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6280 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6281 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00006282
6283 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006284 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6285 cam_offset, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00006286 return 0;
6287}
6288#endif
6289
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006290static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6291 struct bnx2x_client_init_params *params,
6292 u8 activate,
6293 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006294{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006295 /* Clear the buffer */
6296 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006297
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006298 /* general */
6299 data->general.client_id = params->rxq_params.cl_id;
6300 data->general.statistics_counter_id = params->rxq_params.stat_id;
6301 data->general.statistics_en_flg =
6302 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6303 data->general.activate_flg = activate;
6304 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006305
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006306 /* Rx data */
6307 data->rx.tpa_en_flg =
6308 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6309 data->rx.vmqueue_mode_en_flg = 0;
6310 data->rx.cache_line_alignment_log_size =
6311 params->rxq_params.cache_line_log;
6312 data->rx.enable_dynamic_hc =
6313 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6314 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6315 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6316 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6317
6318 /* We don't set drop flags */
6319 data->rx.drop_ip_cs_err_flg = 0;
6320 data->rx.drop_tcp_cs_err_flg = 0;
6321 data->rx.drop_ttl0_flg = 0;
6322 data->rx.drop_udp_cs_err_flg = 0;
6323
6324 data->rx.inner_vlan_removal_enable_flg =
6325 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6326 data->rx.outer_vlan_removal_enable_flg =
6327 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6328 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6329 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6330 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6331 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6332 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6333 data->rx.bd_page_base.lo =
6334 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6335 data->rx.bd_page_base.hi =
6336 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6337 data->rx.sge_page_base.lo =
6338 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6339 data->rx.sge_page_base.hi =
6340 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6341 data->rx.cqe_page_base.lo =
6342 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6343 data->rx.cqe_page_base.hi =
6344 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6345 data->rx.is_leading_rss =
6346 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6347 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6348
6349 /* Tx data */
6350 data->tx.enforce_security_flg = 0; /* VF specific */
6351 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6352 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6353 data->tx.mtu = 0; /* VF specific */
6354 data->tx.tx_bd_page_base.lo =
6355 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6356 data->tx.tx_bd_page_base.hi =
6357 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6358
6359 /* flow control data */
6360 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6361 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6362 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6363 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6364 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6365 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6366 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6367
6368 data->fc.safc_group_num = params->txq_params.cos;
6369 data->fc.safc_group_en_flg =
6370 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6371 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6372}
6373
6374static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6375{
6376 /* ustorm cxt validation */
6377 cxt->ustorm_ag_context.cdu_usage =
6378 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6379 ETH_CONNECTION_TYPE);
6380 /* xcontext validation */
6381 cxt->xstorm_ag_context.cdu_reserved =
6382 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6383 ETH_CONNECTION_TYPE);
6384}
6385
6386int bnx2x_setup_fw_client(struct bnx2x *bp,
6387 struct bnx2x_client_init_params *params,
6388 u8 activate,
6389 struct client_init_ramrod_data *data,
6390 dma_addr_t data_mapping)
6391{
6392 u16 hc_usec;
6393 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6394 int ramrod_flags = 0, rc;
6395
6396 /* HC and context validation values */
6397 hc_usec = params->txq_params.hc_rate ?
6398 1000000 / params->txq_params.hc_rate : 0;
6399 bnx2x_update_coalesce_sb_index(bp,
6400 params->txq_params.fw_sb_id,
6401 params->txq_params.sb_cq_index,
6402 !(params->txq_params.flags & QUEUE_FLG_HC),
6403 hc_usec);
6404
6405 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6406
6407 hc_usec = params->rxq_params.hc_rate ?
6408 1000000 / params->rxq_params.hc_rate : 0;
6409 bnx2x_update_coalesce_sb_index(bp,
6410 params->rxq_params.fw_sb_id,
6411 params->rxq_params.sb_cq_index,
6412 !(params->rxq_params.flags & QUEUE_FLG_HC),
6413 hc_usec);
6414
6415 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6416 params->rxq_params.cid);
6417
6418 /* zero stats */
6419 if (params->txq_params.flags & QUEUE_FLG_STATS)
6420 storm_memset_xstats_zero(bp, BP_PORT(bp),
6421 params->txq_params.stat_id);
6422
6423 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6424 storm_memset_ustats_zero(bp, BP_PORT(bp),
6425 params->rxq_params.stat_id);
6426 storm_memset_tstats_zero(bp, BP_PORT(bp),
6427 params->rxq_params.stat_id);
6428 }
6429
6430 /* Fill the ramrod data */
6431 bnx2x_fill_cl_init_data(bp, params, activate, data);
6432
6433 /* SETUP ramrod.
6434 *
6435 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6436 * barrier except from mmiowb() is needed to impose a
6437 * proper ordering of memory operations.
6438 */
6439 mmiowb();
6440
6441
6442 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6443 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006444
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006445 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006446 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6447 params->ramrod_params.index,
6448 params->ramrod_params.pstate,
6449 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006450 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006451}
6452
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006453/**
6454 * Configure interrupt mode according to current configuration.
6455 * In case of MSI-X it will also try to enable MSI-X.
6456 *
6457 * @param bp
6458 *
6459 * @return int
6460 */
6461static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006462{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006463 int rc = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006464
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006465 switch (bp->int_mode) {
6466 case INT_MODE_MSI:
6467 bnx2x_enable_msi(bp);
6468 /* falling through... */
6469 case INT_MODE_INTx:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006470 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006471 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006472 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006473 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006474 /* Set number of queues according to bp->multi_mode value */
6475 bnx2x_set_num_queues(bp);
6476
6477 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6478 bp->num_queues);
6479
6480 /* if we can't use MSI-X we only need one fp,
6481 * so try to enable MSI-X with the requested number of fp's
6482 * and fallback to MSI or legacy INTx with one fp
6483 */
6484 rc = bnx2x_enable_msix(bp);
6485 if (rc) {
6486 /* failed to enable MSI-X */
6487 if (bp->multi_mode)
6488 DP(NETIF_MSG_IFUP,
6489 "Multi requested but failed to "
6490 "enable MSI-X (%d), "
6491 "set number of queues to %d\n",
6492 bp->num_queues,
6493 1);
6494 bp->num_queues = 1;
6495
6496 if (!(bp->flags & DISABLE_MSI_FLAG))
6497 bnx2x_enable_msi(bp);
6498 }
6499
Eilon Greensteinca003922009-08-12 22:53:28 -07006500 break;
6501 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006502
6503 return rc;
Eilon Greensteinca003922009-08-12 22:53:28 -07006504}
6505
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006506/* must be called prioir to any HW initializations */
6507static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6508{
6509 return L2_ILT_LINES(bp);
6510}
6511
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006512void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006513{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006514 struct ilt_client_info *ilt_client;
6515 struct bnx2x_ilt *ilt = BP_ILT(bp);
6516 u16 line = 0;
6517
6518 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6519 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6520
6521 /* CDU */
6522 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6523 ilt_client->client_num = ILT_CLIENT_CDU;
6524 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6525 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6526 ilt_client->start = line;
6527 line += L2_ILT_LINES(bp);
6528#ifdef BCM_CNIC
6529 line += CNIC_ILT_LINES;
6530#endif
6531 ilt_client->end = line - 1;
6532
6533 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6534 "flags 0x%x, hw psz %d\n",
6535 ilt_client->start,
6536 ilt_client->end,
6537 ilt_client->page_size,
6538 ilt_client->flags,
6539 ilog2(ilt_client->page_size >> 12));
6540
6541 /* QM */
6542 if (QM_INIT(bp->qm_cid_count)) {
6543 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6544 ilt_client->client_num = ILT_CLIENT_QM;
6545 ilt_client->page_size = QM_ILT_PAGE_SZ;
6546 ilt_client->flags = 0;
6547 ilt_client->start = line;
6548
6549 /* 4 bytes for each cid */
6550 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6551 QM_ILT_PAGE_SZ);
6552
6553 ilt_client->end = line - 1;
6554
6555 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6556 "flags 0x%x, hw psz %d\n",
6557 ilt_client->start,
6558 ilt_client->end,
6559 ilt_client->page_size,
6560 ilt_client->flags,
6561 ilog2(ilt_client->page_size >> 12));
6562
6563 }
6564 /* SRC */
6565 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6566#ifdef BCM_CNIC
6567 ilt_client->client_num = ILT_CLIENT_SRC;
6568 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6569 ilt_client->flags = 0;
6570 ilt_client->start = line;
6571 line += SRC_ILT_LINES;
6572 ilt_client->end = line - 1;
6573
6574 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6575 "flags 0x%x, hw psz %d\n",
6576 ilt_client->start,
6577 ilt_client->end,
6578 ilt_client->page_size,
6579 ilt_client->flags,
6580 ilog2(ilt_client->page_size >> 12));
6581
6582#else
6583 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6584#endif
6585
6586 /* TM */
6587 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6588#ifdef BCM_CNIC
6589 ilt_client->client_num = ILT_CLIENT_TM;
6590 ilt_client->page_size = TM_ILT_PAGE_SZ;
6591 ilt_client->flags = 0;
6592 ilt_client->start = line;
6593 line += TM_ILT_LINES;
6594 ilt_client->end = line - 1;
6595
6596 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6597 "flags 0x%x, hw psz %d\n",
6598 ilt_client->start,
6599 ilt_client->end,
6600 ilt_client->page_size,
6601 ilt_client->flags,
6602 ilog2(ilt_client->page_size >> 12));
6603
6604#else
6605 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6606#endif
6607}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006608
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006609int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6610 int is_leading)
6611{
6612 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006613 int rc;
6614
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006615 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6616 IGU_INT_ENABLE, 0);
6617
6618 params.ramrod_params.pstate = &fp->state;
6619 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6620 params.ramrod_params.index = fp->index;
6621 params.ramrod_params.cid = fp->cid;
6622
6623 if (is_leading)
6624 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6625
6626 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6627
6628 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6629
6630 rc = bnx2x_setup_fw_client(bp, &params, 1,
6631 bnx2x_sp(bp, client_init_data),
6632 bnx2x_sp_mapping(bp, client_init_data));
6633 return rc;
6634}
6635
6636int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
6637{
6638 int rc;
6639
6640 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6641
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006642 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006643 *p->pstate = BNX2X_FP_STATE_HALTING;
6644 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6645 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006646
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006647 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006648 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6649 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006650 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006651 return rc;
6652
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006653 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6654 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6655 p->cl_id, 0);
6656 /* Wait for completion */
6657 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6658 p->pstate, poll_flag);
6659 if (rc) /* timeout */
6660 return rc;
6661
6662
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006663 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006664 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006665
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006666 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006667 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6668 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006669 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006670}
6671
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006672static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006673{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006674 struct bnx2x_client_ramrod_params client_stop = {0};
6675 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006676
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006677 client_stop.index = index;
6678 client_stop.cid = fp->cid;
6679 client_stop.cl_id = fp->cl_id;
6680 client_stop.pstate = &(fp->state);
6681 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006682
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006683 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006684}
6685
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006686
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006687static void bnx2x_reset_func(struct bnx2x *bp)
6688{
6689 int port = BP_PORT(bp);
6690 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006691 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006692 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006693 (CHIP_IS_E2(bp) ?
6694 offsetof(struct hc_status_block_data_e2, common) :
6695 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006696 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6697 int pfid_offset = offsetof(struct pci_entity, pf_id);
6698
6699 /* Disable the function in the FW */
6700 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6701 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6702 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6703 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6704
6705 /* FP SBs */
6706 for_each_queue(bp, i) {
6707 struct bnx2x_fastpath *fp = &bp->fp[i];
6708 REG_WR8(bp,
6709 BAR_CSTRORM_INTMEM +
6710 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6711 + pfunc_offset_fp + pfid_offset,
6712 HC_FUNCTION_DISABLED);
6713 }
6714
6715 /* SP SB */
6716 REG_WR8(bp,
6717 BAR_CSTRORM_INTMEM +
6718 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6719 pfunc_offset_sp + pfid_offset,
6720 HC_FUNCTION_DISABLED);
6721
6722
6723 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6724 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6725 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006726
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006727 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006728 if (bp->common.int_block == INT_BLOCK_HC) {
6729 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6730 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6731 } else {
6732 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6733 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6734 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006735
Michael Chan37b091b2009-10-10 13:46:55 +00006736#ifdef BCM_CNIC
6737 /* Disable Timer scan */
6738 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6739 /*
6740 * Wait for at least 10ms and up to 2 second for the timers scan to
6741 * complete
6742 */
6743 for (i = 0; i < 200; i++) {
6744 msleep(10);
6745 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6746 break;
6747 }
6748#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006749 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006750 bnx2x_clear_func_ilt(bp, func);
6751
6752 /* Timers workaround bug for E2: if this is vnic-3,
6753 * we need to set the entire ilt range for this timers.
6754 */
6755 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6756 struct ilt_client_info ilt_cli;
6757 /* use dummy TM client */
6758 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6759 ilt_cli.start = 0;
6760 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6761 ilt_cli.client_num = ILT_CLIENT_TM;
6762
6763 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6764 }
6765
6766 /* this assumes that reset_port() called before reset_func()*/
6767 if (CHIP_IS_E2(bp))
6768 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006769
6770 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006771}
6772
6773static void bnx2x_reset_port(struct bnx2x *bp)
6774{
6775 int port = BP_PORT(bp);
6776 u32 val;
6777
6778 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6779
6780 /* Do not rcv packets to BRB */
6781 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6782 /* Do not direct rcv packets that are not for MCP to the BRB */
6783 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6784 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6785
6786 /* Configure AEU */
6787 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6788
6789 msleep(100);
6790 /* Check for BRB port occupancy */
6791 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6792 if (val)
6793 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07006794 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006795
6796 /* TODO: Close Doorbell port? */
6797}
6798
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006799static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6800{
6801 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006802 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006803
6804 switch (reset_code) {
6805 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6806 bnx2x_reset_port(bp);
6807 bnx2x_reset_func(bp);
6808 bnx2x_reset_common(bp);
6809 break;
6810
6811 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6812 bnx2x_reset_port(bp);
6813 bnx2x_reset_func(bp);
6814 break;
6815
6816 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6817 bnx2x_reset_func(bp);
6818 break;
6819
6820 default:
6821 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6822 break;
6823 }
6824}
6825
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006826void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006827{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006828 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006829 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006830 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006831
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006832 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006833 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006834 struct bnx2x_fastpath *fp = &bp->fp[i];
6835
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006836 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08006837 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006838
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006839 if (!cnt) {
6840 BNX2X_ERR("timeout waiting for queue[%d]\n",
6841 i);
6842#ifdef BNX2X_STOP_ON_ERROR
6843 bnx2x_panic();
6844 return -EBUSY;
6845#else
6846 break;
6847#endif
6848 }
6849 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006850 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006851 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08006852 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006853 /* Give HW time to discard old tx messages */
6854 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006855
Yitchak Gertner65abd742008-08-25 15:26:24 -07006856 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006857 /* invalidate mc list,
6858 * wait and poll (interrupts are off)
6859 */
6860 bnx2x_invlidate_e1_mc_list(bp);
6861 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006862
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006863 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006864 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6865
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006866 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006867
6868 for (i = 0; i < MC_HASH_SIZE; i++)
6869 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6870 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006871
Michael Chan993ac7b2009-10-10 13:46:56 +00006872#ifdef BCM_CNIC
6873 /* Clear iSCSI L2 MAC */
6874 mutex_lock(&bp->cnic_mutex);
6875 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6876 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6877 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6878 }
6879 mutex_unlock(&bp->cnic_mutex);
6880#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07006881
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006882 if (unload_mode == UNLOAD_NORMAL)
6883 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006884
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006885 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006886 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006887
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006888 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006889 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006890 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006891 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006892 /* The mac address is written to entries 1-4 to
6893 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006894 u8 entry = (BP_E1HVN(bp) + 1)*8;
6895
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006896 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006897 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006898
6899 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6900 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006901 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006902
6903 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006904
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006905 } else
6906 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6907
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006908 /* Close multi and leading connections
6909 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006910 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006911
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006912 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006913#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006914 return;
6915#else
6916 goto unload_error;
6917#endif
6918
6919 rc = bnx2x_func_stop(bp);
6920 if (rc) {
6921 BNX2X_ERR("Function stop failed!\n");
6922#ifdef BNX2X_STOP_ON_ERROR
6923 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006924#else
6925 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006926#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08006927 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006928#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08006929unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006930#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006931 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006932 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006933 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006934 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6935 "%d, %d, %d\n", BP_PATH(bp),
6936 load_count[BP_PATH(bp)][0],
6937 load_count[BP_PATH(bp)][1],
6938 load_count[BP_PATH(bp)][2]);
6939 load_count[BP_PATH(bp)][0]--;
6940 load_count[BP_PATH(bp)][1 + port]--;
6941 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6942 "%d, %d, %d\n", BP_PATH(bp),
6943 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6944 load_count[BP_PATH(bp)][2]);
6945 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006946 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006947 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006948 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6949 else
6950 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6951 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006952
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006953 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6954 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6955 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006956
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006957 /* Disable HW interrupts, NAPI */
6958 bnx2x_netif_stop(bp, 1);
6959
6960 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006961 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006962
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006963 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08006964 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006965
6966 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006967 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006968 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006969
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006970}
6971
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006972void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006973{
6974 u32 val;
6975
6976 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6977
6978 if (CHIP_IS_E1(bp)) {
6979 int port = BP_PORT(bp);
6980 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6981 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6982
6983 val = REG_RD(bp, addr);
6984 val &= ~(0x300);
6985 REG_WR(bp, addr, val);
6986 } else if (CHIP_IS_E1H(bp)) {
6987 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6988 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6989 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6990 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6991 }
6992}
6993
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006994/* Close gates #2, #3 and #4: */
6995static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6996{
6997 u32 val, addr;
6998
6999 /* Gates #2 and #4a are closed/opened for "not E1" only */
7000 if (!CHIP_IS_E1(bp)) {
7001 /* #4 */
7002 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7003 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7004 close ? (val | 0x1) : (val & (~(u32)1)));
7005 /* #2 */
7006 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7007 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7008 close ? (val | 0x1) : (val & (~(u32)1)));
7009 }
7010
7011 /* #3 */
7012 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7013 val = REG_RD(bp, addr);
7014 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7015
7016 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7017 close ? "closing" : "opening");
7018 mmiowb();
7019}
7020
7021#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7022
7023static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7024{
7025 /* Do some magic... */
7026 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7027 *magic_val = val & SHARED_MF_CLP_MAGIC;
7028 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7029}
7030
7031/* Restore the value of the `magic' bit.
7032 *
7033 * @param pdev Device handle.
7034 * @param magic_val Old value of the `magic' bit.
7035 */
7036static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7037{
7038 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007039 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7040 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7041 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7042}
7043
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007044/**
7045 * Prepares for MCP reset: takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007046 *
7047 * @param bp
7048 * @param magic_val Old value of 'magic' bit.
7049 */
7050static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7051{
7052 u32 shmem;
7053 u32 validity_offset;
7054
7055 DP(NETIF_MSG_HW, "Starting\n");
7056
7057 /* Set `magic' bit in order to save MF config */
7058 if (!CHIP_IS_E1(bp))
7059 bnx2x_clp_reset_prep(bp, magic_val);
7060
7061 /* Get shmem offset */
7062 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7063 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7064
7065 /* Clear validity map flags */
7066 if (shmem > 0)
7067 REG_WR(bp, shmem + validity_offset, 0);
7068}
7069
7070#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7071#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7072
7073/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7074 * depending on the HW type.
7075 *
7076 * @param bp
7077 */
7078static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7079{
7080 /* special handling for emulation and FPGA,
7081 wait 10 times longer */
7082 if (CHIP_REV_IS_SLOW(bp))
7083 msleep(MCP_ONE_TIMEOUT*10);
7084 else
7085 msleep(MCP_ONE_TIMEOUT);
7086}
7087
7088static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7089{
7090 u32 shmem, cnt, validity_offset, val;
7091 int rc = 0;
7092
7093 msleep(100);
7094
7095 /* Get shmem offset */
7096 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7097 if (shmem == 0) {
7098 BNX2X_ERR("Shmem 0 return failure\n");
7099 rc = -ENOTTY;
7100 goto exit_lbl;
7101 }
7102
7103 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7104
7105 /* Wait for MCP to come up */
7106 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7107 /* TBD: its best to check validity map of last port.
7108 * currently checks on port 0.
7109 */
7110 val = REG_RD(bp, shmem + validity_offset);
7111 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7112 shmem + validity_offset, val);
7113
7114 /* check that shared memory is valid. */
7115 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7116 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7117 break;
7118
7119 bnx2x_mcp_wait_one(bp);
7120 }
7121
7122 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7123
7124 /* Check that shared memory is valid. This indicates that MCP is up. */
7125 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7126 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7127 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7128 rc = -ENOTTY;
7129 goto exit_lbl;
7130 }
7131
7132exit_lbl:
7133 /* Restore the `magic' bit value */
7134 if (!CHIP_IS_E1(bp))
7135 bnx2x_clp_reset_done(bp, magic_val);
7136
7137 return rc;
7138}
7139
7140static void bnx2x_pxp_prep(struct bnx2x *bp)
7141{
7142 if (!CHIP_IS_E1(bp)) {
7143 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7144 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7145 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7146 mmiowb();
7147 }
7148}
7149
7150/*
7151 * Reset the whole chip except for:
7152 * - PCIE core
7153 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7154 * one reset bit)
7155 * - IGU
7156 * - MISC (including AEU)
7157 * - GRC
7158 * - RBCN, RBCP
7159 */
7160static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7161{
7162 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7163
7164 not_reset_mask1 =
7165 MISC_REGISTERS_RESET_REG_1_RST_HC |
7166 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7167 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7168
7169 not_reset_mask2 =
7170 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7171 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7172 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7173 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7174 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7175 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7176 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7177 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7178
7179 reset_mask1 = 0xffffffff;
7180
7181 if (CHIP_IS_E1(bp))
7182 reset_mask2 = 0xffff;
7183 else
7184 reset_mask2 = 0x1ffff;
7185
7186 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7187 reset_mask1 & (~not_reset_mask1));
7188 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7189 reset_mask2 & (~not_reset_mask2));
7190
7191 barrier();
7192 mmiowb();
7193
7194 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7195 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7196 mmiowb();
7197}
7198
7199static int bnx2x_process_kill(struct bnx2x *bp)
7200{
7201 int cnt = 1000;
7202 u32 val = 0;
7203 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7204
7205
7206 /* Empty the Tetris buffer, wait for 1s */
7207 do {
7208 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7209 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7210 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7211 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7212 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7213 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7214 ((port_is_idle_0 & 0x1) == 0x1) &&
7215 ((port_is_idle_1 & 0x1) == 0x1) &&
7216 (pgl_exp_rom2 == 0xffffffff))
7217 break;
7218 msleep(1);
7219 } while (cnt-- > 0);
7220
7221 if (cnt <= 0) {
7222 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7223 " are still"
7224 " outstanding read requests after 1s!\n");
7225 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7226 " port_is_idle_0=0x%08x,"
7227 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7228 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7229 pgl_exp_rom2);
7230 return -EAGAIN;
7231 }
7232
7233 barrier();
7234
7235 /* Close gates #2, #3 and #4 */
7236 bnx2x_set_234_gates(bp, true);
7237
7238 /* TBD: Indicate that "process kill" is in progress to MCP */
7239
7240 /* Clear "unprepared" bit */
7241 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7242 barrier();
7243
7244 /* Make sure all is written to the chip before the reset */
7245 mmiowb();
7246
7247 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7248 * PSWHST, GRC and PSWRD Tetris buffer.
7249 */
7250 msleep(1);
7251
7252 /* Prepare to chip reset: */
7253 /* MCP */
7254 bnx2x_reset_mcp_prep(bp, &val);
7255
7256 /* PXP */
7257 bnx2x_pxp_prep(bp);
7258 barrier();
7259
7260 /* reset the chip */
7261 bnx2x_process_kill_chip_reset(bp);
7262 barrier();
7263
7264 /* Recover after reset: */
7265 /* MCP */
7266 if (bnx2x_reset_mcp_comp(bp, val))
7267 return -EAGAIN;
7268
7269 /* PXP */
7270 bnx2x_pxp_prep(bp);
7271
7272 /* Open the gates #2, #3 and #4 */
7273 bnx2x_set_234_gates(bp, false);
7274
7275 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7276 * reset state, re-enable attentions. */
7277
7278 return 0;
7279}
7280
7281static int bnx2x_leader_reset(struct bnx2x *bp)
7282{
7283 int rc = 0;
7284 /* Try to recover after the failure */
7285 if (bnx2x_process_kill(bp)) {
7286 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7287 bp->dev->name);
7288 rc = -EAGAIN;
7289 goto exit_leader_reset;
7290 }
7291
7292 /* Clear "reset is in progress" bit and update the driver state */
7293 bnx2x_set_reset_done(bp);
7294 bp->recovery_state = BNX2X_RECOVERY_DONE;
7295
7296exit_leader_reset:
7297 bp->is_leader = 0;
7298 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7299 smp_wmb();
7300 return rc;
7301}
7302
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007303/* Assumption: runs under rtnl lock. This together with the fact
7304 * that it's called only from bnx2x_reset_task() ensure that it
7305 * will never be called when netif_running(bp->dev) is false.
7306 */
7307static void bnx2x_parity_recover(struct bnx2x *bp)
7308{
7309 DP(NETIF_MSG_HW, "Handling parity\n");
7310 while (1) {
7311 switch (bp->recovery_state) {
7312 case BNX2X_RECOVERY_INIT:
7313 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7314 /* Try to get a LEADER_LOCK HW lock */
7315 if (bnx2x_trylock_hw_lock(bp,
7316 HW_LOCK_RESOURCE_RESERVED_08))
7317 bp->is_leader = 1;
7318
7319 /* Stop the driver */
7320 /* If interface has been removed - break */
7321 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7322 return;
7323
7324 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7325 /* Ensure "is_leader" and "recovery_state"
7326 * update values are seen on other CPUs
7327 */
7328 smp_wmb();
7329 break;
7330
7331 case BNX2X_RECOVERY_WAIT:
7332 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7333 if (bp->is_leader) {
7334 u32 load_counter = bnx2x_get_load_cnt(bp);
7335 if (load_counter) {
7336 /* Wait until all other functions get
7337 * down.
7338 */
7339 schedule_delayed_work(&bp->reset_task,
7340 HZ/10);
7341 return;
7342 } else {
7343 /* If all other functions got down -
7344 * try to bring the chip back to
7345 * normal. In any case it's an exit
7346 * point for a leader.
7347 */
7348 if (bnx2x_leader_reset(bp) ||
7349 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7350 printk(KERN_ERR"%s: Recovery "
7351 "has failed. Power cycle is "
7352 "needed.\n", bp->dev->name);
7353 /* Disconnect this device */
7354 netif_device_detach(bp->dev);
7355 /* Block ifup for all function
7356 * of this ASIC until
7357 * "process kill" or power
7358 * cycle.
7359 */
7360 bnx2x_set_reset_in_progress(bp);
7361 /* Shut down the power */
7362 bnx2x_set_power_state(bp,
7363 PCI_D3hot);
7364 return;
7365 }
7366
7367 return;
7368 }
7369 } else { /* non-leader */
7370 if (!bnx2x_reset_is_done(bp)) {
7371 /* Try to get a LEADER_LOCK HW lock as
7372 * long as a former leader may have
7373 * been unloaded by the user or
7374 * released a leadership by another
7375 * reason.
7376 */
7377 if (bnx2x_trylock_hw_lock(bp,
7378 HW_LOCK_RESOURCE_RESERVED_08)) {
7379 /* I'm a leader now! Restart a
7380 * switch case.
7381 */
7382 bp->is_leader = 1;
7383 break;
7384 }
7385
7386 schedule_delayed_work(&bp->reset_task,
7387 HZ/10);
7388 return;
7389
7390 } else { /* A leader has completed
7391 * the "process kill". It's an exit
7392 * point for a non-leader.
7393 */
7394 bnx2x_nic_load(bp, LOAD_NORMAL);
7395 bp->recovery_state =
7396 BNX2X_RECOVERY_DONE;
7397 smp_wmb();
7398 return;
7399 }
7400 }
7401 default:
7402 return;
7403 }
7404 }
7405}
7406
7407/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7408 * scheduled on a general queue in order to prevent a dead lock.
7409 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007410static void bnx2x_reset_task(struct work_struct *work)
7411{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007412 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007413
7414#ifdef BNX2X_STOP_ON_ERROR
7415 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7416 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007417 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007418 return;
7419#endif
7420
7421 rtnl_lock();
7422
7423 if (!netif_running(bp->dev))
7424 goto reset_task_exit;
7425
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007426 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7427 bnx2x_parity_recover(bp);
7428 else {
7429 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7430 bnx2x_nic_load(bp, LOAD_NORMAL);
7431 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007432
7433reset_task_exit:
7434 rtnl_unlock();
7435}
7436
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007437/* end of nic load/unload */
7438
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007439/*
7440 * Init service functions
7441 */
7442
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007443u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007444{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007445 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7446 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7447 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007448}
7449
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007450static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007451{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007452 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007453
7454 /* Flush all outstanding writes */
7455 mmiowb();
7456
7457 /* Pretend to be function 0 */
7458 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007459 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007460
7461 /* From now we are in the "like-E1" mode */
7462 bnx2x_int_disable(bp);
7463
7464 /* Flush all outstanding writes */
7465 mmiowb();
7466
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007467 /* Restore the original function */
7468 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7469 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007470}
7471
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007472static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007473{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007474 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007475 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007476 else
7477 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007478}
7479
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007480static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007481{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007482 u32 val;
7483
7484 /* Check if there is any driver already loaded */
7485 val = REG_RD(bp, MISC_REG_UNPREPARED);
7486 if (val == 0x1) {
7487 /* Check if it is the UNDI driver
7488 * UNDI driver initializes CID offset for normal bell to 0x7
7489 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007490 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007491 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7492 if (val == 0x7) {
7493 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007494 /* save our pf_num */
7495 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007496 u32 swap_en;
7497 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007498
Eilon Greensteinb4661732009-01-14 06:43:56 +00007499 /* clear the UNDI indication */
7500 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7501
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007502 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7503
7504 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007505 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007506 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007507 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007508 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007509 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007510
7511 /* if UNDI is loaded on the other port */
7512 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7513
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007514 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007515 bnx2x_fw_command(bp,
7516 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007517
7518 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007519 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007520 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007521 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007522 DRV_MSG_SEQ_NUMBER_MASK);
7523 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007524
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007525 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007526 }
7527
Eilon Greensteinb4661732009-01-14 06:43:56 +00007528 /* now it's safe to release the lock */
7529 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7530
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007531 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007532
7533 /* close input traffic and wait for it */
7534 /* Do not rcv packets to BRB */
7535 REG_WR(bp,
7536 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7537 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7538 /* Do not direct rcv packets that are not for MCP to
7539 * the BRB */
7540 REG_WR(bp,
7541 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7542 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7543 /* clear AEU */
7544 REG_WR(bp,
7545 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7546 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7547 msleep(10);
7548
7549 /* save NIG port swap info */
7550 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7551 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007552 /* reset device */
7553 REG_WR(bp,
7554 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007555 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007556 REG_WR(bp,
7557 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7558 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007559 /* take the NIG out of reset and restore swap values */
7560 REG_WR(bp,
7561 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7562 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7563 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7564 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7565
7566 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007567 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007568
7569 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007570 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007571 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007572 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007573 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007574 } else
7575 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007576 }
7577}
7578
7579static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7580{
7581 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007582 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007583
7584 /* Get the chip revision id and number. */
7585 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7586 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7587 id = ((val & 0xffff) << 16);
7588 val = REG_RD(bp, MISC_REG_CHIP_REV);
7589 id |= ((val & 0xf) << 12);
7590 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7591 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007592 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007593 id |= (val & 0xf);
7594 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007595
7596 /* Set doorbell size */
7597 bp->db_size = (1 << BNX2X_DB_SHIFT);
7598
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007599 if (CHIP_IS_E2(bp)) {
7600 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7601 if ((val & 1) == 0)
7602 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7603 else
7604 val = (val >> 1) & 1;
7605 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7606 "2_PORT_MODE");
7607 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7608 CHIP_2_PORT_MODE;
7609
7610 if (CHIP_MODE_IS_4_PORT(bp))
7611 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7612 else
7613 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7614 } else {
7615 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7616 bp->pfid = bp->pf_num; /* 0..7 */
7617 }
7618
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007619 /*
7620 * set base FW non-default (fast path) status block id, this value is
7621 * used to initialize the fw_sb_id saved on the fp/queue structure to
7622 * determine the id used by the FW.
7623 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007624 if (CHIP_IS_E1x(bp))
7625 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7626 else /* E2 */
7627 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7628
7629 bp->link_params.chip_id = bp->common.chip_id;
7630 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007631
Eilon Greenstein1c063282009-02-12 08:36:43 +00007632 val = (REG_RD(bp, 0x2874) & 0x55);
7633 if ((bp->common.chip_id & 0x1) ||
7634 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7635 bp->flags |= ONE_PORT_FLAG;
7636 BNX2X_DEV_INFO("single port device\n");
7637 }
7638
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007639 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7640 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7641 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7642 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7643 bp->common.flash_size, bp->common.flash_size);
7644
7645 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007646 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7647 MISC_REG_GENERIC_CR_1 :
7648 MISC_REG_GENERIC_CR_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007649 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007650 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007651 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7652 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007653
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007654 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007655 BNX2X_DEV_INFO("MCP not active\n");
7656 bp->flags |= NO_MCP_FLAG;
7657 return;
7658 }
7659
7660 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7661 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7662 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007663 BNX2X_ERR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007664
7665 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007666 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007667
7668 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7669 SHARED_HW_CFG_LED_MODE_MASK) >>
7670 SHARED_HW_CFG_LED_MODE_SHIFT);
7671
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007672 bp->link_params.feature_config_flags = 0;
7673 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7674 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7675 bp->link_params.feature_config_flags |=
7676 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7677 else
7678 bp->link_params.feature_config_flags &=
7679 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7680
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007681 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7682 bp->common.bc_ver = val;
7683 BNX2X_DEV_INFO("bc_ver %X\n", val);
7684 if (val < BNX2X_BC_VER) {
7685 /* for now only warn
7686 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007687 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7688 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007689 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007690 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007691 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007692 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7693
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007694 bp->link_params.feature_config_flags |=
7695 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7696 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007697
7698 if (BP_E1HVN(bp) == 0) {
7699 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7700 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7701 } else {
7702 /* no WOL capability for E1HVN != 0 */
7703 bp->flags |= NO_WOL_FLAG;
7704 }
7705 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007706 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007707
7708 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7709 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7710 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7711 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7712
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007713 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7714 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007715}
7716
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007717#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7718#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7719
7720static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7721{
7722 int pfid = BP_FUNC(bp);
7723 int vn = BP_E1HVN(bp);
7724 int igu_sb_id;
7725 u32 val;
7726 u8 fid;
7727
7728 bp->igu_base_sb = 0xff;
7729 bp->igu_sb_cnt = 0;
7730 if (CHIP_INT_MODE_IS_BC(bp)) {
7731 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7732 bp->l2_cid_count);
7733
7734 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7735 FP_SB_MAX_E1x;
7736
7737 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7738 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7739
7740 return;
7741 }
7742
7743 /* IGU in normal mode - read CAM */
7744 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7745 igu_sb_id++) {
7746 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7747 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7748 continue;
7749 fid = IGU_FID(val);
7750 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7751 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7752 continue;
7753 if (IGU_VEC(val) == 0)
7754 /* default status block */
7755 bp->igu_dsb_id = igu_sb_id;
7756 else {
7757 if (bp->igu_base_sb == 0xff)
7758 bp->igu_base_sb = igu_sb_id;
7759 bp->igu_sb_cnt++;
7760 }
7761 }
7762 }
7763 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7764 if (bp->igu_sb_cnt == 0)
7765 BNX2X_ERR("CAM configuration error\n");
7766}
7767
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007768static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7769 u32 switch_cfg)
7770{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007771 int cfg_size = 0, idx, port = BP_PORT(bp);
7772
7773 /* Aggregation of supported attributes of all external phys */
7774 bp->port.supported[0] = 0;
7775 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007776 switch (bp->link_params.num_phys) {
7777 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007778 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7779 cfg_size = 1;
7780 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007781 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007782 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7783 cfg_size = 1;
7784 break;
7785 case 3:
7786 if (bp->link_params.multi_phy_config &
7787 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7788 bp->port.supported[1] =
7789 bp->link_params.phy[EXT_PHY1].supported;
7790 bp->port.supported[0] =
7791 bp->link_params.phy[EXT_PHY2].supported;
7792 } else {
7793 bp->port.supported[0] =
7794 bp->link_params.phy[EXT_PHY1].supported;
7795 bp->port.supported[1] =
7796 bp->link_params.phy[EXT_PHY2].supported;
7797 }
7798 cfg_size = 2;
7799 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007800 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007801
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007802 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007803 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007804 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007805 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007806 dev_info.port_hw_config[port].external_phy_config),
7807 SHMEM_RD(bp,
7808 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007809 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007810 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007811
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007812 switch (switch_cfg) {
7813 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007814 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7815 port*0x10);
7816 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007817 break;
7818
7819 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007820 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7821 port*0x18);
7822 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007823 break;
7824
7825 default:
7826 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007827 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007828 return;
7829 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007830 /* mask what we support according to speed_cap_mask per configuration */
7831 for (idx = 0; idx < cfg_size; idx++) {
7832 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007833 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007834 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007835
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007836 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007837 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007838 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007839
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007840 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007841 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007842 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007843
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007844 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007845 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007846 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007847
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007848 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007849 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007850 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007851 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007852
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007853 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007854 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007855 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007856
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007857 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007858 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007859 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007860
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007861 }
7862
7863 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7864 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007865}
7866
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007867static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007868{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007869 u32 link_config, idx, cfg_size = 0;
7870 bp->port.advertising[0] = 0;
7871 bp->port.advertising[1] = 0;
7872 switch (bp->link_params.num_phys) {
7873 case 1:
7874 case 2:
7875 cfg_size = 1;
7876 break;
7877 case 3:
7878 cfg_size = 2;
7879 break;
7880 }
7881 for (idx = 0; idx < cfg_size; idx++) {
7882 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7883 link_config = bp->port.link_config[idx];
7884 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007885 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007886 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7887 bp->link_params.req_line_speed[idx] =
7888 SPEED_AUTO_NEG;
7889 bp->port.advertising[idx] |=
7890 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007891 } else {
7892 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007893 bp->link_params.req_line_speed[idx] =
7894 SPEED_10000;
7895 bp->port.advertising[idx] |=
7896 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007897 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007898 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007899 }
7900 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007901
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007902 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007903 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7904 bp->link_params.req_line_speed[idx] =
7905 SPEED_10;
7906 bp->port.advertising[idx] |=
7907 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007908 ADVERTISED_TP);
7909 } else {
7910 BNX2X_ERROR("NVRAM config error. "
7911 "Invalid link_config 0x%x"
7912 " speed_cap_mask 0x%x\n",
7913 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007914 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007915 return;
7916 }
7917 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007918
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007919 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007920 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7921 bp->link_params.req_line_speed[idx] =
7922 SPEED_10;
7923 bp->link_params.req_duplex[idx] =
7924 DUPLEX_HALF;
7925 bp->port.advertising[idx] |=
7926 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007927 ADVERTISED_TP);
7928 } else {
7929 BNX2X_ERROR("NVRAM config error. "
7930 "Invalid link_config 0x%x"
7931 " speed_cap_mask 0x%x\n",
7932 link_config,
7933 bp->link_params.speed_cap_mask[idx]);
7934 return;
7935 }
7936 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007937
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007938 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7939 if (bp->port.supported[idx] &
7940 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007941 bp->link_params.req_line_speed[idx] =
7942 SPEED_100;
7943 bp->port.advertising[idx] |=
7944 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007945 ADVERTISED_TP);
7946 } else {
7947 BNX2X_ERROR("NVRAM config error. "
7948 "Invalid link_config 0x%x"
7949 " speed_cap_mask 0x%x\n",
7950 link_config,
7951 bp->link_params.speed_cap_mask[idx]);
7952 return;
7953 }
7954 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007955
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007956 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7957 if (bp->port.supported[idx] &
7958 SUPPORTED_100baseT_Half) {
7959 bp->link_params.req_line_speed[idx] =
7960 SPEED_100;
7961 bp->link_params.req_duplex[idx] =
7962 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007963 bp->port.advertising[idx] |=
7964 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007965 ADVERTISED_TP);
7966 } else {
7967 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007968 "Invalid link_config 0x%x"
7969 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007970 link_config,
7971 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007972 return;
7973 }
7974 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007975
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007976 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007977 if (bp->port.supported[idx] &
7978 SUPPORTED_1000baseT_Full) {
7979 bp->link_params.req_line_speed[idx] =
7980 SPEED_1000;
7981 bp->port.advertising[idx] |=
7982 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007983 ADVERTISED_TP);
7984 } else {
7985 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007986 "Invalid link_config 0x%x"
7987 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007988 link_config,
7989 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007990 return;
7991 }
7992 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007993
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007994 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007995 if (bp->port.supported[idx] &
7996 SUPPORTED_2500baseX_Full) {
7997 bp->link_params.req_line_speed[idx] =
7998 SPEED_2500;
7999 bp->port.advertising[idx] |=
8000 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008001 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008002 } else {
8003 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008004 "Invalid link_config 0x%x"
8005 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008006 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008007 bp->link_params.speed_cap_mask[idx]);
8008 return;
8009 }
8010 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008011
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008012 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8013 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8014 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008015 if (bp->port.supported[idx] &
8016 SUPPORTED_10000baseT_Full) {
8017 bp->link_params.req_line_speed[idx] =
8018 SPEED_10000;
8019 bp->port.advertising[idx] |=
8020 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008021 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008022 } else {
8023 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008024 "Invalid link_config 0x%x"
8025 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008026 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008027 bp->link_params.speed_cap_mask[idx]);
8028 return;
8029 }
8030 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008031
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008032 default:
8033 BNX2X_ERROR("NVRAM config error. "
8034 "BAD link speed link_config 0x%x\n",
8035 link_config);
8036 bp->link_params.req_line_speed[idx] =
8037 SPEED_AUTO_NEG;
8038 bp->port.advertising[idx] =
8039 bp->port.supported[idx];
8040 break;
8041 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008042
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008043 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008044 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008045 if ((bp->link_params.req_flow_ctrl[idx] ==
8046 BNX2X_FLOW_CTRL_AUTO) &&
8047 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8048 bp->link_params.req_flow_ctrl[idx] =
8049 BNX2X_FLOW_CTRL_NONE;
8050 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008051
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008052 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8053 " 0x%x advertising 0x%x\n",
8054 bp->link_params.req_line_speed[idx],
8055 bp->link_params.req_duplex[idx],
8056 bp->link_params.req_flow_ctrl[idx],
8057 bp->port.advertising[idx]);
8058 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008059}
8060
Michael Chane665bfd2009-10-10 13:46:54 +00008061static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8062{
8063 mac_hi = cpu_to_be16(mac_hi);
8064 mac_lo = cpu_to_be32(mac_lo);
8065 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8066 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8067}
8068
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008069static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008070{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008071 int port = BP_PORT(bp);
8072 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008073 u32 config;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008074 u32 ext_phy_type, ext_phy_config;;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008075
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008076 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008077 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008078
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008079 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008080 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008081
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008082 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008083 SHMEM_RD(bp,
8084 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008085 bp->link_params.speed_cap_mask[1] =
8086 SHMEM_RD(bp,
8087 dev_info.port_hw_config[port].speed_capability_mask2);
8088 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008089 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8090
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008091 bp->port.link_config[1] =
8092 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008093
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008094 bp->link_params.multi_phy_config =
8095 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008096 /* If the device is capable of WoL, set the default state according
8097 * to the HW
8098 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008099 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008100 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8101 (config & PORT_FEATURE_WOL_ENABLED));
8102
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008103 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008104 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008105 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008106 bp->link_params.speed_cap_mask[0],
8107 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008108
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008109 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008110 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008111 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008112 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008113
8114 bnx2x_link_settings_requested(bp);
8115
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008116 /*
8117 * If connected directly, work with the internal PHY, otherwise, work
8118 * with the external PHY
8119 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008120 ext_phy_config =
8121 SHMEM_RD(bp,
8122 dev_info.port_hw_config[port].external_phy_config);
8123 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008124 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008125 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008126
8127 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8128 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8129 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008130 XGXS_EXT_PHY_ADDR(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008131
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008132 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8133 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008134 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008135 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8136 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008137
8138#ifdef BCM_CNIC
8139 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8140 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8141 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8142#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008143}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008144
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008145static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8146{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008147 int func = BP_ABS_FUNC(bp);
8148 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008149 u32 val, val2;
8150 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008151
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008152 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008153
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008154 if (CHIP_IS_E1x(bp)) {
8155 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008156
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008157 bp->igu_dsb_id = DEF_SB_IGU_ID;
8158 bp->igu_base_sb = 0;
8159 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8160 } else {
8161 bp->common.int_block = INT_BLOCK_IGU;
8162 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8163 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8164 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8165 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8166 } else
8167 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8168
8169 bnx2x_get_igu_cam_info(bp);
8170
8171 }
8172 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8173 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8174
8175 /*
8176 * Initialize MF configuration
8177 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008178
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008179 bp->mf_ov = 0;
8180 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008181 vn = BP_E1HVN(bp);
8182 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8183 if (SHMEM2_HAS(bp, mf_cfg_addr))
8184 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8185 else
8186 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008187 offsetof(struct shmem_region, func_mb) +
8188 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008189 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008190 MF_CFG_RD(bp, func_mf_config[func].config);
8191
8192 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008193 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008194 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008195 bp->mf_mode = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008196 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008197 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008198
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008199 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008200 val = (MF_CFG_RD(bp, func_mf_config[func].
Eilon Greenstein2691d512009-08-12 08:22:08 +00008201 e1hov_tag) &
8202 FUNC_MF_CFG_E1HOV_TAG_MASK);
8203 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008204 bp->mf_ov = val;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008205 BNX2X_DEV_INFO("MF OV for func %d is %d "
Eilon Greenstein2691d512009-08-12 08:22:08 +00008206 "(0x%04x)\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008207 func, bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008208 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008209 BNX2X_ERROR("No valid MF OV for func %d,"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008210 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008211 rc = -EPERM;
8212 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008213 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008214 if (BP_VN(bp)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008215 BNX2X_ERROR("VN %d in single function mode,"
8216 " aborting\n", BP_E1HVN(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00008217 rc = -EPERM;
8218 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008219 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008220 }
8221
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008222 /* adjust igu_sb_cnt to MF for E1x */
8223 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008224 bp->igu_sb_cnt /= E1HVN_MAX;
8225
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008226 /*
8227 * adjust E2 sb count: to be removed when FW will support
8228 * more then 16 L2 clients
8229 */
8230#define MAX_L2_CLIENTS 16
8231 if (CHIP_IS_E2(bp))
8232 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8233 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8234
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008235 if (!BP_NOMCP(bp)) {
8236 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008237
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008238 bp->fw_seq =
8239 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8240 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008241 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8242 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008243
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008244 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008245 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8246 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008247 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8248 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8249 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8250 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8251 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8252 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8253 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8254 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8255 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8256 ETH_ALEN);
8257 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8258 ETH_ALEN);
8259 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008260
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008261 return rc;
8262 }
8263
8264 if (BP_NOMCP(bp)) {
8265 /* only supposed to happen on emulation/FPGA */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008266 BNX2X_ERROR("warning: random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008267 random_ether_addr(bp->dev->dev_addr);
8268 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8269 }
8270
8271 return rc;
8272}
8273
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008274static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8275{
8276 int cnt, i, block_end, rodi;
8277 char vpd_data[BNX2X_VPD_LEN+1];
8278 char str_id_reg[VENDOR_ID_LEN+1];
8279 char str_id_cap[VENDOR_ID_LEN+1];
8280 u8 len;
8281
8282 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8283 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8284
8285 if (cnt < BNX2X_VPD_LEN)
8286 goto out_not_found;
8287
8288 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8289 PCI_VPD_LRDT_RO_DATA);
8290 if (i < 0)
8291 goto out_not_found;
8292
8293
8294 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8295 pci_vpd_lrdt_size(&vpd_data[i]);
8296
8297 i += PCI_VPD_LRDT_TAG_SIZE;
8298
8299 if (block_end > BNX2X_VPD_LEN)
8300 goto out_not_found;
8301
8302 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8303 PCI_VPD_RO_KEYWORD_MFR_ID);
8304 if (rodi < 0)
8305 goto out_not_found;
8306
8307 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8308
8309 if (len != VENDOR_ID_LEN)
8310 goto out_not_found;
8311
8312 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8313
8314 /* vendor specific info */
8315 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8316 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8317 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8318 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8319
8320 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8321 PCI_VPD_RO_KEYWORD_VENDOR0);
8322 if (rodi >= 0) {
8323 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8324
8325 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8326
8327 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8328 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8329 bp->fw_ver[len] = ' ';
8330 }
8331 }
8332 return;
8333 }
8334out_not_found:
8335 return;
8336}
8337
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008338static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8339{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008340 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008341 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008342 int rc;
8343
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008344 /* Disable interrupt handling until HW is initialized */
8345 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008346 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008347
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008348 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008349 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008350 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008351#ifdef BCM_CNIC
8352 mutex_init(&bp->cnic_mutex);
8353#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008354
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008355 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008356 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008357
8358 rc = bnx2x_get_hwinfo(bp);
8359
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008360 if (!rc)
8361 rc = bnx2x_alloc_mem_bp(bp);
8362
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008363 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008364
8365 func = BP_FUNC(bp);
8366
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008367 /* need to reset chip if undi was active */
8368 if (!BP_NOMCP(bp))
8369 bnx2x_undi_unload(bp);
8370
8371 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008372 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008373
8374 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008375 dev_err(&bp->pdev->dev, "MCP disabled, "
8376 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008377
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008378 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008379 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8380 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008381 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8382 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008383 multi_mode = ETH_RSS_MODE_DISABLED;
8384 }
8385 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008386 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008387
Dmitry Kravkov4fd89b72010-04-01 19:45:34 -07008388 bp->dev->features |= NETIF_F_GRO;
8389
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008390 /* Set TPA flags */
8391 if (disable_tpa) {
8392 bp->flags &= ~TPA_ENABLE_FLAG;
8393 bp->dev->features &= ~NETIF_F_LRO;
8394 } else {
8395 bp->flags |= TPA_ENABLE_FLAG;
8396 bp->dev->features |= NETIF_F_LRO;
8397 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008398 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008399
Eilon Greensteina18f5122009-08-12 08:23:26 +00008400 if (CHIP_IS_E1(bp))
8401 bp->dropless_fc = 0;
8402 else
8403 bp->dropless_fc = dropless_fc;
8404
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008405 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008406
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008407 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008408
8409 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008410
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008411 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008412 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8413 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008414
Eilon Greenstein87942b42009-02-12 08:36:49 +00008415 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8416 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008417
8418 init_timer(&bp->timer);
8419 bp->timer.expires = jiffies + bp->current_interval;
8420 bp->timer.data = (unsigned long) bp;
8421 bp->timer.function = bnx2x_timer;
8422
8423 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008424}
8425
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008426
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008427/****************************************************************************
8428* General service functions
8429****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008430
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008431/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008432static int bnx2x_open(struct net_device *dev)
8433{
8434 struct bnx2x *bp = netdev_priv(dev);
8435
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008436 netif_carrier_off(dev);
8437
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008438 bnx2x_set_power_state(bp, PCI_D0);
8439
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008440 if (!bnx2x_reset_is_done(bp)) {
8441 do {
8442 /* Reset MCP mail box sequence if there is on going
8443 * recovery
8444 */
8445 bp->fw_seq = 0;
8446
8447 /* If it's the first function to load and reset done
8448 * is still not cleared it may mean that. We don't
8449 * check the attention state here because it may have
8450 * already been cleared by a "common" reset but we
8451 * shell proceed with "process kill" anyway.
8452 */
8453 if ((bnx2x_get_load_cnt(bp) == 0) &&
8454 bnx2x_trylock_hw_lock(bp,
8455 HW_LOCK_RESOURCE_RESERVED_08) &&
8456 (!bnx2x_leader_reset(bp))) {
8457 DP(NETIF_MSG_HW, "Recovered in open\n");
8458 break;
8459 }
8460
8461 bnx2x_set_power_state(bp, PCI_D3hot);
8462
8463 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8464 " completed yet. Try again later. If u still see this"
8465 " message after a few retries then power cycle is"
8466 " required.\n", bp->dev->name);
8467
8468 return -EAGAIN;
8469 } while (0);
8470 }
8471
8472 bp->recovery_state = BNX2X_RECOVERY_DONE;
8473
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008474 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008475}
8476
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008477/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008478static int bnx2x_close(struct net_device *dev)
8479{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008480 struct bnx2x *bp = netdev_priv(dev);
8481
8482 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008483 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008484 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008485
8486 return 0;
8487}
8488
Eilon Greensteinf5372252009-02-12 08:38:30 +00008489/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008490void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008491{
8492 struct bnx2x *bp = netdev_priv(dev);
8493 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8494 int port = BP_PORT(bp);
8495
8496 if (bp->state != BNX2X_STATE_OPEN) {
8497 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8498 return;
8499 }
8500
8501 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8502
8503 if (dev->flags & IFF_PROMISC)
8504 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008505 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00008506 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8507 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008508 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008509 else { /* some multicasts */
8510 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008511 /*
8512 * set mc list, do not wait as wait implies sleep
8513 * and set_rx_mode can be invoked from non-sleepable
8514 * context
8515 */
8516 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8517 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8518 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008519
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008520 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008521 } else { /* E1H */
8522 /* Accept one or more multicasts */
Jiri Pirko22bedad2010-04-01 21:22:57 +00008523 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008524 u32 mc_filter[MC_HASH_SIZE];
8525 u32 crc, bit, regidx;
8526 int i;
8527
8528 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8529
Jiri Pirko22bedad2010-04-01 21:22:57 +00008530 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07008531 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008532 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008533
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008534 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8535 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008536 bit = (crc >> 24) & 0xff;
8537 regidx = bit >> 5;
8538 bit &= 0x1f;
8539 mc_filter[regidx] |= (1 << bit);
8540 }
8541
8542 for (i = 0; i < MC_HASH_SIZE; i++)
8543 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8544 mc_filter[i]);
8545 }
8546 }
8547
8548 bp->rx_mode = rx_mode;
8549 bnx2x_set_storm_rx_mode(bp);
8550}
8551
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008552/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008553static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8554 int devad, u16 addr)
8555{
8556 struct bnx2x *bp = netdev_priv(netdev);
8557 u16 value;
8558 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008559
8560 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8561 prtad, devad, addr);
8562
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008563 /* The HW expects different devad if CL22 is used */
8564 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8565
8566 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008567 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008568 bnx2x_release_phy_lock(bp);
8569 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8570
8571 if (!rc)
8572 rc = value;
8573 return rc;
8574}
8575
8576/* called with rtnl_lock */
8577static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8578 u16 addr, u16 value)
8579{
8580 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008581 int rc;
8582
8583 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8584 " value 0x%x\n", prtad, devad, addr, value);
8585
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008586 /* The HW expects different devad if CL22 is used */
8587 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8588
8589 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008590 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008591 bnx2x_release_phy_lock(bp);
8592 return rc;
8593}
8594
8595/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008596static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8597{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008598 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008599 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008600
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008601 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8602 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008603
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008604 if (!netif_running(dev))
8605 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008606
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008607 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008608}
8609
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008610#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008611static void poll_bnx2x(struct net_device *dev)
8612{
8613 struct bnx2x *bp = netdev_priv(dev);
8614
8615 disable_irq(bp->pdev->irq);
8616 bnx2x_interrupt(bp->pdev->irq, dev);
8617 enable_irq(bp->pdev->irq);
8618}
8619#endif
8620
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008621static const struct net_device_ops bnx2x_netdev_ops = {
8622 .ndo_open = bnx2x_open,
8623 .ndo_stop = bnx2x_close,
8624 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008625 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008626 .ndo_set_mac_address = bnx2x_change_mac_addr,
8627 .ndo_validate_addr = eth_validate_addr,
8628 .ndo_do_ioctl = bnx2x_ioctl,
8629 .ndo_change_mtu = bnx2x_change_mtu,
8630 .ndo_tx_timeout = bnx2x_tx_timeout,
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008631#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008632 .ndo_poll_controller = poll_bnx2x,
8633#endif
8634};
8635
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008636static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8637 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008638{
8639 struct bnx2x *bp;
8640 int rc;
8641
8642 SET_NETDEV_DEV(dev, &pdev->dev);
8643 bp = netdev_priv(dev);
8644
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008645 bp->dev = dev;
8646 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008647 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008648 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008649
8650 rc = pci_enable_device(pdev);
8651 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008652 dev_err(&bp->pdev->dev,
8653 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008654 goto err_out;
8655 }
8656
8657 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008658 dev_err(&bp->pdev->dev,
8659 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008660 rc = -ENODEV;
8661 goto err_out_disable;
8662 }
8663
8664 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008665 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8666 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008667 rc = -ENODEV;
8668 goto err_out_disable;
8669 }
8670
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008671 if (atomic_read(&pdev->enable_cnt) == 1) {
8672 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8673 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008674 dev_err(&bp->pdev->dev,
8675 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008676 goto err_out_disable;
8677 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008678
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008679 pci_set_master(pdev);
8680 pci_save_state(pdev);
8681 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008682
8683 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8684 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008685 dev_err(&bp->pdev->dev,
8686 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008687 rc = -EIO;
8688 goto err_out_release;
8689 }
8690
8691 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8692 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008693 dev_err(&bp->pdev->dev,
8694 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008695 rc = -EIO;
8696 goto err_out_release;
8697 }
8698
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008699 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008700 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008701 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008702 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8703 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008704 rc = -EIO;
8705 goto err_out_release;
8706 }
8707
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008708 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008709 dev_err(&bp->pdev->dev,
8710 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008711 rc = -EIO;
8712 goto err_out_release;
8713 }
8714
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008715 dev->mem_start = pci_resource_start(pdev, 0);
8716 dev->base_addr = dev->mem_start;
8717 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008718
8719 dev->irq = pdev->irq;
8720
Arjan van de Ven275f1652008-10-20 21:42:39 -07008721 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008722 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008723 dev_err(&bp->pdev->dev,
8724 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008725 rc = -ENOMEM;
8726 goto err_out_release;
8727 }
8728
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008729 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008730 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008731 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008732 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008733 dev_err(&bp->pdev->dev,
8734 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008735 rc = -ENOMEM;
8736 goto err_out_unmap;
8737 }
8738
8739 bnx2x_set_power_state(bp, PCI_D0);
8740
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008741 /* clean indirect addresses */
8742 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8743 PCICFG_VENDOR_ID_OFFSET);
8744 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8745 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8746 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8747 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008748
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008749 /* Reset the load counter */
8750 bnx2x_clear_load_cnt(bp);
8751
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008752 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008753
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008754 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008755 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008756 dev->features |= NETIF_F_SG;
8757 dev->features |= NETIF_F_HW_CSUM;
8758 if (bp->flags & USING_DAC_FLAG)
8759 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008760 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8761 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008762 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008763
8764 dev->vlan_features |= NETIF_F_SG;
8765 dev->vlan_features |= NETIF_F_HW_CSUM;
8766 if (bp->flags & USING_DAC_FLAG)
8767 dev->vlan_features |= NETIF_F_HIGHDMA;
8768 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8769 dev->vlan_features |= NETIF_F_TSO6;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008770
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008771 /* get_port_hwinfo() will set prtad and mmds properly */
8772 bp->mdio.prtad = MDIO_PRTAD_NONE;
8773 bp->mdio.mmds = 0;
8774 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8775 bp->mdio.dev = dev;
8776 bp->mdio.mdio_read = bnx2x_mdio_read;
8777 bp->mdio.mdio_write = bnx2x_mdio_write;
8778
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008779 return 0;
8780
8781err_out_unmap:
8782 if (bp->regview) {
8783 iounmap(bp->regview);
8784 bp->regview = NULL;
8785 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008786 if (bp->doorbells) {
8787 iounmap(bp->doorbells);
8788 bp->doorbells = NULL;
8789 }
8790
8791err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008792 if (atomic_read(&pdev->enable_cnt) == 1)
8793 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008794
8795err_out_disable:
8796 pci_disable_device(pdev);
8797 pci_set_drvdata(pdev, NULL);
8798
8799err_out:
8800 return rc;
8801}
8802
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008803static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8804 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08008805{
8806 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8807
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008808 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8809
8810 /* return value of 1=2.5GHz 2=5GHz */
8811 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08008812}
8813
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008814static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008815{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008816 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008817 struct bnx2x_fw_file_hdr *fw_hdr;
8818 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008819 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008820 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008821 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008822 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008823
8824 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8825 return -EINVAL;
8826
8827 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8828 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8829
8830 /* Make sure none of the offsets and sizes make us read beyond
8831 * the end of the firmware data */
8832 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8833 offset = be32_to_cpu(sections[i].offset);
8834 len = be32_to_cpu(sections[i].len);
8835 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008836 dev_err(&bp->pdev->dev,
8837 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008838 return -EINVAL;
8839 }
8840 }
8841
8842 /* Likewise for the init_ops offsets */
8843 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8844 ops_offsets = (u16 *)(firmware->data + offset);
8845 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8846
8847 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8848 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008849 dev_err(&bp->pdev->dev,
8850 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008851 return -EINVAL;
8852 }
8853 }
8854
8855 /* Check FW version */
8856 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8857 fw_ver = firmware->data + offset;
8858 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8859 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8860 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8861 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008862 dev_err(&bp->pdev->dev,
8863 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008864 fw_ver[0], fw_ver[1], fw_ver[2],
8865 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8866 BCM_5710_FW_MINOR_VERSION,
8867 BCM_5710_FW_REVISION_VERSION,
8868 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008869 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008870 }
8871
8872 return 0;
8873}
8874
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008875static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008876{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008877 const __be32 *source = (const __be32 *)_source;
8878 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008879 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008880
8881 for (i = 0; i < n/4; i++)
8882 target[i] = be32_to_cpu(source[i]);
8883}
8884
8885/*
8886 Ops array is stored in the following format:
8887 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8888 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008889static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008890{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008891 const __be32 *source = (const __be32 *)_source;
8892 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008893 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008894
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008895 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008896 tmp = be32_to_cpu(source[j]);
8897 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008898 target[i].offset = tmp & 0xffffff;
8899 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008900 }
8901}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008902
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008903/**
8904 * IRO array is stored in the following format:
8905 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8906 */
8907static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8908{
8909 const __be32 *source = (const __be32 *)_source;
8910 struct iro *target = (struct iro *)_target;
8911 u32 i, j, tmp;
8912
8913 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8914 target[i].base = be32_to_cpu(source[j]);
8915 j++;
8916 tmp = be32_to_cpu(source[j]);
8917 target[i].m1 = (tmp >> 16) & 0xffff;
8918 target[i].m2 = tmp & 0xffff;
8919 j++;
8920 tmp = be32_to_cpu(source[j]);
8921 target[i].m3 = (tmp >> 16) & 0xffff;
8922 target[i].size = tmp & 0xffff;
8923 j++;
8924 }
8925}
8926
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008927static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008928{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008929 const __be16 *source = (const __be16 *)_source;
8930 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008931 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008932
8933 for (i = 0; i < n/2; i++)
8934 target[i] = be16_to_cpu(source[i]);
8935}
8936
Joe Perches7995c642010-02-17 15:01:52 +00008937#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8938do { \
8939 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8940 bp->arr = kmalloc(len, GFP_KERNEL); \
8941 if (!bp->arr) { \
8942 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8943 goto lbl; \
8944 } \
8945 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8946 (u8 *)bp->arr, len); \
8947} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008948
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008949int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008950{
Ben Hutchings45229b42009-11-07 11:53:39 +00008951 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008952 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00008953 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008954
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008955 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008956 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008957 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008958 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008959 else if (CHIP_IS_E2(bp))
8960 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008961 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008962 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008963 return -EINVAL;
8964 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008965
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008966 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008967
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008968 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008969 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008970 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008971 goto request_firmware_exit;
8972 }
8973
8974 rc = bnx2x_check_firmware(bp);
8975 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008976 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008977 goto request_firmware_exit;
8978 }
8979
8980 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8981
8982 /* Initialize the pointers to the init arrays */
8983 /* Blob */
8984 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8985
8986 /* Opcodes */
8987 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8988
8989 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008990 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8991 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008992
8993 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00008994 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8995 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8996 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8997 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8998 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8999 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9000 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9001 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9002 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9003 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9004 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9005 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9006 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9007 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9008 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9009 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009010 /* IRO */
9011 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009012
9013 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009014
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009015iro_alloc_err:
9016 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009017init_offsets_alloc_err:
9018 kfree(bp->init_ops);
9019init_ops_alloc_err:
9020 kfree(bp->init_data);
9021request_firmware_exit:
9022 release_firmware(bp->firmware);
9023
9024 return rc;
9025}
9026
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009027static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9028{
9029 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009030
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009031#ifdef BCM_CNIC
9032 cid_count += CNIC_CID_MAX;
9033#endif
9034 return roundup(cid_count, QM_CID_ROUND);
9035}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009036
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009037static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9038 const struct pci_device_id *ent)
9039{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009040 struct net_device *dev = NULL;
9041 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009042 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009043 int rc, cid_count;
9044
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009045 switch (ent->driver_data) {
9046 case BCM57710:
9047 case BCM57711:
9048 case BCM57711E:
9049 cid_count = FP_SB_MAX_E1x;
9050 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009051
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009052 case BCM57712:
9053 case BCM57712E:
9054 cid_count = FP_SB_MAX_E2;
9055 break;
9056
9057 default:
9058 pr_err("Unknown board_type (%ld), aborting\n",
9059 ent->driver_data);
9060 return ENODEV;
9061 }
9062
9063 cid_count += CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009064
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009065 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009066 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009067 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009068 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009069 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009070 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009071
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009072 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009073 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009074
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009075 pci_set_drvdata(pdev, dev);
9076
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009077 bp->l2_cid_count = cid_count;
9078
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009079 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009080 if (rc < 0) {
9081 free_netdev(dev);
9082 return rc;
9083 }
9084
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009085 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009086 if (rc)
9087 goto init_one_exit;
9088
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009089 /* calc qm_cid_count */
9090 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9091
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009092 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009093 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009094 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009095 goto init_one_exit;
9096 }
9097
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009098 /* Configure interupt mode: try to enable MSI-X/MSI if
9099 * needed, set bp->num_queues appropriately.
9100 */
9101 bnx2x_set_int_mode(bp);
9102
9103 /* Add all NAPI objects */
9104 bnx2x_add_all_napi(bp);
9105
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009106 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009107
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009108 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9109 " IRQ %d, ", board_info[ent->driver_data].name,
9110 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009111 pcie_width,
9112 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9113 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9114 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009115 dev->base_addr, bp->pdev->irq);
9116 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009117
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009118 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009119
9120init_one_exit:
9121 if (bp->regview)
9122 iounmap(bp->regview);
9123
9124 if (bp->doorbells)
9125 iounmap(bp->doorbells);
9126
9127 free_netdev(dev);
9128
9129 if (atomic_read(&pdev->enable_cnt) == 1)
9130 pci_release_regions(pdev);
9131
9132 pci_disable_device(pdev);
9133 pci_set_drvdata(pdev, NULL);
9134
9135 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009136}
9137
9138static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9139{
9140 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009141 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009142
Eliezer Tamir228241e2008-02-28 11:56:57 -08009143 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009144 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009145 return;
9146 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009147 bp = netdev_priv(dev);
9148
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009149 unregister_netdev(dev);
9150
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009151 /* Delete all NAPI objects */
9152 bnx2x_del_all_napi(bp);
9153
9154 /* Disable MSI/MSI-X */
9155 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009156
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009157 /* Make sure RESET task is not scheduled before continuing */
9158 cancel_delayed_work_sync(&bp->reset_task);
9159
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009160 if (bp->regview)
9161 iounmap(bp->regview);
9162
9163 if (bp->doorbells)
9164 iounmap(bp->doorbells);
9165
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009166 bnx2x_free_mem_bp(bp);
9167
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009168 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009169
9170 if (atomic_read(&pdev->enable_cnt) == 1)
9171 pci_release_regions(pdev);
9172
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009173 pci_disable_device(pdev);
9174 pci_set_drvdata(pdev, NULL);
9175}
9176
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009177static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9178{
9179 int i;
9180
9181 bp->state = BNX2X_STATE_ERROR;
9182
9183 bp->rx_mode = BNX2X_RX_MODE_NONE;
9184
9185 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009186 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009187
9188 del_timer_sync(&bp->timer);
9189 bp->stats_state = STATS_STATE_DISABLED;
9190 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9191
9192 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009193 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009194
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009195 /* Free SKBs, SGEs, TPA pool and driver internals */
9196 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009197
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00009198 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009199 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009200
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009201 bnx2x_free_mem(bp);
9202
9203 bp->state = BNX2X_STATE_CLOSED;
9204
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009205 return 0;
9206}
9207
9208static void bnx2x_eeh_recover(struct bnx2x *bp)
9209{
9210 u32 val;
9211
9212 mutex_init(&bp->port.phy_mutex);
9213
9214 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9215 bp->link_params.shmem_base = bp->common.shmem_base;
9216 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9217
9218 if (!bp->common.shmem_base ||
9219 (bp->common.shmem_base < 0xA0000) ||
9220 (bp->common.shmem_base >= 0xC0000)) {
9221 BNX2X_DEV_INFO("MCP not active\n");
9222 bp->flags |= NO_MCP_FLAG;
9223 return;
9224 }
9225
9226 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9227 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9228 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9229 BNX2X_ERR("BAD MCP validity signature\n");
9230
9231 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009232 bp->fw_seq =
9233 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9234 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009235 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9236 }
9237}
9238
Wendy Xiong493adb12008-06-23 20:36:22 -07009239/**
9240 * bnx2x_io_error_detected - called when PCI error is detected
9241 * @pdev: Pointer to PCI device
9242 * @state: The current pci connection state
9243 *
9244 * This function is called after a PCI bus error affecting
9245 * this device has been detected.
9246 */
9247static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9248 pci_channel_state_t state)
9249{
9250 struct net_device *dev = pci_get_drvdata(pdev);
9251 struct bnx2x *bp = netdev_priv(dev);
9252
9253 rtnl_lock();
9254
9255 netif_device_detach(dev);
9256
Dean Nelson07ce50e2009-07-31 09:13:25 +00009257 if (state == pci_channel_io_perm_failure) {
9258 rtnl_unlock();
9259 return PCI_ERS_RESULT_DISCONNECT;
9260 }
9261
Wendy Xiong493adb12008-06-23 20:36:22 -07009262 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009263 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009264
9265 pci_disable_device(pdev);
9266
9267 rtnl_unlock();
9268
9269 /* Request a slot reset */
9270 return PCI_ERS_RESULT_NEED_RESET;
9271}
9272
9273/**
9274 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9275 * @pdev: Pointer to PCI device
9276 *
9277 * Restart the card from scratch, as if from a cold-boot.
9278 */
9279static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9280{
9281 struct net_device *dev = pci_get_drvdata(pdev);
9282 struct bnx2x *bp = netdev_priv(dev);
9283
9284 rtnl_lock();
9285
9286 if (pci_enable_device(pdev)) {
9287 dev_err(&pdev->dev,
9288 "Cannot re-enable PCI device after reset\n");
9289 rtnl_unlock();
9290 return PCI_ERS_RESULT_DISCONNECT;
9291 }
9292
9293 pci_set_master(pdev);
9294 pci_restore_state(pdev);
9295
9296 if (netif_running(dev))
9297 bnx2x_set_power_state(bp, PCI_D0);
9298
9299 rtnl_unlock();
9300
9301 return PCI_ERS_RESULT_RECOVERED;
9302}
9303
9304/**
9305 * bnx2x_io_resume - called when traffic can start flowing again
9306 * @pdev: Pointer to PCI device
9307 *
9308 * This callback is called when the error recovery driver tells us that
9309 * its OK to resume normal operation.
9310 */
9311static void bnx2x_io_resume(struct pci_dev *pdev)
9312{
9313 struct net_device *dev = pci_get_drvdata(pdev);
9314 struct bnx2x *bp = netdev_priv(dev);
9315
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009316 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009317 printk(KERN_ERR "Handling parity error recovery. "
9318 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009319 return;
9320 }
9321
Wendy Xiong493adb12008-06-23 20:36:22 -07009322 rtnl_lock();
9323
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009324 bnx2x_eeh_recover(bp);
9325
Wendy Xiong493adb12008-06-23 20:36:22 -07009326 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009327 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009328
9329 netif_device_attach(dev);
9330
9331 rtnl_unlock();
9332}
9333
9334static struct pci_error_handlers bnx2x_err_handler = {
9335 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009336 .slot_reset = bnx2x_io_slot_reset,
9337 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009338};
9339
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009340static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009341 .name = DRV_MODULE_NAME,
9342 .id_table = bnx2x_pci_tbl,
9343 .probe = bnx2x_init_one,
9344 .remove = __devexit_p(bnx2x_remove_one),
9345 .suspend = bnx2x_suspend,
9346 .resume = bnx2x_resume,
9347 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009348};
9349
9350static int __init bnx2x_init(void)
9351{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009352 int ret;
9353
Joe Perches7995c642010-02-17 15:01:52 +00009354 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009355
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009356 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9357 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009358 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009359 return -ENOMEM;
9360 }
9361
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009362 ret = pci_register_driver(&bnx2x_pci_driver);
9363 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009364 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009365 destroy_workqueue(bnx2x_wq);
9366 }
9367 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009368}
9369
9370static void __exit bnx2x_cleanup(void)
9371{
9372 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009373
9374 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009375}
9376
9377module_init(bnx2x_init);
9378module_exit(bnx2x_cleanup);
9379
Michael Chan993ac7b2009-10-10 13:46:56 +00009380#ifdef BCM_CNIC
9381
9382/* count denotes the number of new completions we have seen */
9383static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9384{
9385 struct eth_spe *spe;
9386
9387#ifdef BNX2X_STOP_ON_ERROR
9388 if (unlikely(bp->panic))
9389 return;
9390#endif
9391
9392 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009393 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009394 bp->cnic_spq_pending -= count;
9395
Michael Chan993ac7b2009-10-10 13:46:56 +00009396
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009397 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9398 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9399 & SPE_HDR_CONN_TYPE) >>
9400 SPE_HDR_CONN_TYPE_SHIFT;
9401
9402 /* Set validation for iSCSI L2 client before sending SETUP
9403 * ramrod
9404 */
9405 if (type == ETH_CONNECTION_TYPE) {
9406 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9407 hdr.conn_and_cmd_data) >>
9408 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9409
9410 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9411 bnx2x_set_ctx_validation(&bp->context.
9412 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9413 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9414 }
9415
9416 /* There may be not more than 8 L2 and COMMON SPEs and not more
9417 * than 8 L5 SPEs in the air.
9418 */
9419 if ((type == NONE_CONNECTION_TYPE) ||
9420 (type == ETH_CONNECTION_TYPE)) {
9421 if (!atomic_read(&bp->spq_left))
9422 break;
9423 else
9424 atomic_dec(&bp->spq_left);
9425 } else if (type == ISCSI_CONNECTION_TYPE) {
9426 if (bp->cnic_spq_pending >=
9427 bp->cnic_eth_dev.max_kwqe_pending)
9428 break;
9429 else
9430 bp->cnic_spq_pending++;
9431 } else {
9432 BNX2X_ERR("Unknown SPE type: %d\n", type);
9433 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009434 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009435 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009436
9437 spe = bnx2x_sp_get_next(bp);
9438 *spe = *bp->cnic_kwq_cons;
9439
Michael Chan993ac7b2009-10-10 13:46:56 +00009440 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9441 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9442
9443 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9444 bp->cnic_kwq_cons = bp->cnic_kwq;
9445 else
9446 bp->cnic_kwq_cons++;
9447 }
9448 bnx2x_sp_prod_update(bp);
9449 spin_unlock_bh(&bp->spq_lock);
9450}
9451
9452static int bnx2x_cnic_sp_queue(struct net_device *dev,
9453 struct kwqe_16 *kwqes[], u32 count)
9454{
9455 struct bnx2x *bp = netdev_priv(dev);
9456 int i;
9457
9458#ifdef BNX2X_STOP_ON_ERROR
9459 if (unlikely(bp->panic))
9460 return -EIO;
9461#endif
9462
9463 spin_lock_bh(&bp->spq_lock);
9464
9465 for (i = 0; i < count; i++) {
9466 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9467
9468 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9469 break;
9470
9471 *bp->cnic_kwq_prod = *spe;
9472
9473 bp->cnic_kwq_pending++;
9474
9475 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9476 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009477 spe->data.update_data_addr.hi,
9478 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009479 bp->cnic_kwq_pending);
9480
9481 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9482 bp->cnic_kwq_prod = bp->cnic_kwq;
9483 else
9484 bp->cnic_kwq_prod++;
9485 }
9486
9487 spin_unlock_bh(&bp->spq_lock);
9488
9489 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9490 bnx2x_cnic_sp_post(bp, 0);
9491
9492 return i;
9493}
9494
9495static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9496{
9497 struct cnic_ops *c_ops;
9498 int rc = 0;
9499
9500 mutex_lock(&bp->cnic_mutex);
9501 c_ops = bp->cnic_ops;
9502 if (c_ops)
9503 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9504 mutex_unlock(&bp->cnic_mutex);
9505
9506 return rc;
9507}
9508
9509static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9510{
9511 struct cnic_ops *c_ops;
9512 int rc = 0;
9513
9514 rcu_read_lock();
9515 c_ops = rcu_dereference(bp->cnic_ops);
9516 if (c_ops)
9517 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9518 rcu_read_unlock();
9519
9520 return rc;
9521}
9522
9523/*
9524 * for commands that have no data
9525 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009526int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00009527{
9528 struct cnic_ctl_info ctl = {0};
9529
9530 ctl.cmd = cmd;
9531
9532 return bnx2x_cnic_ctl_send(bp, &ctl);
9533}
9534
9535static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9536{
9537 struct cnic_ctl_info ctl;
9538
9539 /* first we tell CNIC and only then we count this as a completion */
9540 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9541 ctl.data.comp.cid = cid;
9542
9543 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009544 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009545}
9546
9547static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9548{
9549 struct bnx2x *bp = netdev_priv(dev);
9550 int rc = 0;
9551
9552 switch (ctl->cmd) {
9553 case DRV_CTL_CTXTBL_WR_CMD: {
9554 u32 index = ctl->data.io.offset;
9555 dma_addr_t addr = ctl->data.io.dma_addr;
9556
9557 bnx2x_ilt_wr(bp, index, addr);
9558 break;
9559 }
9560
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009561 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9562 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +00009563
9564 bnx2x_cnic_sp_post(bp, count);
9565 break;
9566 }
9567
9568 /* rtnl_lock is held. */
9569 case DRV_CTL_START_L2_CMD: {
9570 u32 cli = ctl->data.ring.client_id;
9571
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009572 /* Set iSCSI MAC address */
9573 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9574
9575 mmiowb();
9576 barrier();
9577
9578 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9579 * because it's the only way for UIO Client to accept
9580 * multicasts (in non-promiscuous mode only one Client per
9581 * function will receive multicast packets (leading in our
9582 * case).
9583 */
9584 bnx2x_rxq_set_mac_filters(bp, cli,
9585 BNX2X_ACCEPT_UNICAST |
9586 BNX2X_ACCEPT_BROADCAST |
9587 BNX2X_ACCEPT_ALL_MULTICAST);
9588 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9589
Michael Chan993ac7b2009-10-10 13:46:56 +00009590 break;
9591 }
9592
9593 /* rtnl_lock is held. */
9594 case DRV_CTL_STOP_L2_CMD: {
9595 u32 cli = ctl->data.ring.client_id;
9596
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009597 /* Stop accepting on iSCSI L2 ring */
9598 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9599 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9600
9601 mmiowb();
9602 barrier();
9603
9604 /* Unset iSCSI L2 MAC */
9605 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009606 break;
9607 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009608 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9609 int count = ctl->data.credit.credit_count;
9610
9611 smp_mb__before_atomic_inc();
9612 atomic_add(count, &bp->spq_left);
9613 smp_mb__after_atomic_inc();
9614 break;
9615 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009616
9617 default:
9618 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9619 rc = -EINVAL;
9620 }
9621
9622 return rc;
9623}
9624
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009625void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +00009626{
9627 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9628
9629 if (bp->flags & USING_MSIX_FLAG) {
9630 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9631 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9632 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9633 } else {
9634 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9635 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9636 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009637 if (CHIP_IS_E2(bp))
9638 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9639 else
9640 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9641
Michael Chan993ac7b2009-10-10 13:46:56 +00009642 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009643 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009644 cp->irq_arr[1].status_blk = bp->def_status_blk;
9645 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009646 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009647
9648 cp->num_irq = 2;
9649}
9650
9651static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9652 void *data)
9653{
9654 struct bnx2x *bp = netdev_priv(dev);
9655 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9656
9657 if (ops == NULL)
9658 return -EINVAL;
9659
9660 if (atomic_read(&bp->intr_sem) != 0)
9661 return -EBUSY;
9662
9663 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9664 if (!bp->cnic_kwq)
9665 return -ENOMEM;
9666
9667 bp->cnic_kwq_cons = bp->cnic_kwq;
9668 bp->cnic_kwq_prod = bp->cnic_kwq;
9669 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9670
9671 bp->cnic_spq_pending = 0;
9672 bp->cnic_kwq_pending = 0;
9673
9674 bp->cnic_data = data;
9675
9676 cp->num_irq = 0;
9677 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009678 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +00009679
Michael Chan993ac7b2009-10-10 13:46:56 +00009680 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009681
Michael Chan993ac7b2009-10-10 13:46:56 +00009682 rcu_assign_pointer(bp->cnic_ops, ops);
9683
9684 return 0;
9685}
9686
9687static int bnx2x_unregister_cnic(struct net_device *dev)
9688{
9689 struct bnx2x *bp = netdev_priv(dev);
9690 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9691
9692 mutex_lock(&bp->cnic_mutex);
9693 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9694 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9695 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9696 }
9697 cp->drv_state = 0;
9698 rcu_assign_pointer(bp->cnic_ops, NULL);
9699 mutex_unlock(&bp->cnic_mutex);
9700 synchronize_rcu();
9701 kfree(bp->cnic_kwq);
9702 bp->cnic_kwq = NULL;
9703
9704 return 0;
9705}
9706
9707struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9708{
9709 struct bnx2x *bp = netdev_priv(dev);
9710 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9711
9712 cp->drv_owner = THIS_MODULE;
9713 cp->chip_id = CHIP_ID(bp);
9714 cp->pdev = bp->pdev;
9715 cp->io_base = bp->regview;
9716 cp->io_base2 = bp->doorbells;
9717 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009718 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009719 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9720 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009721 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009722 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +00009723 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9724 cp->drv_ctl = bnx2x_drv_ctl;
9725 cp->drv_register_cnic = bnx2x_register_cnic;
9726 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009727 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9728 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009729
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009730 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9731 "starting cid %d\n",
9732 cp->ctx_blk_size,
9733 cp->ctx_tbl_offset,
9734 cp->ctx_tbl_len,
9735 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +00009736 return cp;
9737}
9738EXPORT_SYMBOL(bnx2x_cnic_probe);
9739
9740#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009741