blob: 7689872349f24fe103a40c2ca2947b0ffed711b3 [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipati7826f302011-07-20 16:59:13 -070019#include "bfad_im.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070020#include "bfa_ioc.h"
Krishna Gudipati11189202011-06-13 15:50:35 -070021#include "bfi_reg.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022#include "bfa_defs.h"
23#include "bfa_defs_svc.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070024
Krishna Gudipati7af074d2010-03-05 19:35:45 -080025BFA_TRC_FILE(CNA, IOC);
Jing Huang7725ccf2009-09-23 17:46:15 -070026
Jing Huang5fbe25c2010-10-18 17:17:23 -070027/*
Jing Huang7725ccf2009-09-23 17:46:15 -070028 * IOC local definitions
29 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070030#define BFA_IOC_TOV 3000 /* msecs */
31#define BFA_IOC_HWSEM_TOV 500 /* msecs */
32#define BFA_IOC_HB_TOV 500 /* msecs */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070033#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
Krishna Gudipati775c7742011-06-13 15:52:12 -070034#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
Jing Huang7725ccf2009-09-23 17:46:15 -070035
36#define bfa_ioc_timer_start(__ioc) \
37 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
38 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
40
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070041#define bfa_hb_timer_start(__ioc) \
42 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
43 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
45
Jing Huang7725ccf2009-09-23 17:46:15 -070046#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
Jing Huang7725ccf2009-09-23 17:46:15 -070047
Jing Huang5fbe25c2010-10-18 17:17:23 -070048/*
Krishna Gudipati0a20de42010-03-05 19:34:20 -080049 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50 */
51
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070052#define bfa_ioc_firmware_lock(__ioc) \
Krishna Gudipati0a20de42010-03-05 19:34:20 -080053 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070054#define bfa_ioc_firmware_unlock(__ioc) \
Krishna Gudipati0a20de42010-03-05 19:34:20 -080055 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
Krishna Gudipati0a20de42010-03-05 19:34:20 -080056#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
Krishna Gudipatif1d584d2010-12-13 16:17:11 -080058#define bfa_ioc_notify_fail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
Jing Huang45d7f0c2011-04-13 11:45:53 -070060#define bfa_ioc_sync_start(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
Krishna Gudipatif1d584d2010-12-13 16:17:11 -080062#define bfa_ioc_sync_join(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64#define bfa_ioc_sync_leave(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66#define bfa_ioc_sync_ack(__ioc) \
67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68#define bfa_ioc_sync_complete(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070070
71#define bfa_ioc_mbox_cmd_pending(__ioc) \
72 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
Jing Huang53440262010-10-18 17:12:29 -070073 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070074
75bfa_boolean_t bfa_auto_recover = BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -070076
77/*
78 * forward declarations
79 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070080static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070081static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
82static void bfa_ioc_timeout(void *ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -070083static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070084static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
85static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
86static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
87static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070088static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
Krishna Gudipati8b070b42011-06-13 15:52:40 -070089static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070090static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
Krishna Gudipatid37779f2011-06-13 15:42:10 -070091static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
92 enum bfa_ioc_event_e event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070093static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
94static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -080095static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070096static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -070097
Jing Huang5fbe25c2010-10-18 17:17:23 -070098/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070099 * IOC state machine definitions/declarations
Jing Huang7725ccf2009-09-23 17:46:15 -0700100 */
101enum ioc_event {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700102 IOC_E_RESET = 1, /* IOC reset request */
103 IOC_E_ENABLE = 2, /* IOC enable request */
104 IOC_E_DISABLE = 3, /* IOC disable request */
105 IOC_E_DETACH = 4, /* driver detach cleanup */
106 IOC_E_ENABLED = 5, /* f/w enabled */
107 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
108 IOC_E_DISABLED = 7, /* f/w disabled */
Krishna Gudipati775c7742011-06-13 15:52:12 -0700109 IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
110 IOC_E_HBFAIL = 9, /* heartbeat failure */
111 IOC_E_HWERROR = 10, /* hardware error interrupt */
112 IOC_E_TIMEOUT = 11, /* timeout */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700113 IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
Jing Huang7725ccf2009-09-23 17:46:15 -0700114};
115
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700116bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700117bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700118bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
119bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
120bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800121bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700122bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700123bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
124bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700125bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700126
127static struct bfa_sm_table_s ioc_sm_table[] = {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700128 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
Jing Huang7725ccf2009-09-23 17:46:15 -0700129 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700130 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
Jing Huang7725ccf2009-09-23 17:46:15 -0700131 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
132 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800133 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700134 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
Jing Huang7725ccf2009-09-23 17:46:15 -0700135 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
136 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700137 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
Jing Huang7725ccf2009-09-23 17:46:15 -0700138};
139
Jing Huang5fbe25c2010-10-18 17:17:23 -0700140/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700141 * IOCPF state machine definitions/declarations
142 */
143
144#define bfa_iocpf_timer_start(__ioc) \
145 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
146 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
147#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
148
Krishna Gudipati775c7742011-06-13 15:52:12 -0700149#define bfa_iocpf_poll_timer_start(__ioc) \
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700150 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
Krishna Gudipati775c7742011-06-13 15:52:12 -0700151 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700152
153#define bfa_sem_timer_start(__ioc) \
154 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
155 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
156#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
157
158/*
159 * Forward declareations for iocpf state machine
160 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700161static void bfa_iocpf_timeout(void *ioc_arg);
162static void bfa_iocpf_sem_timeout(void *ioc_arg);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700163static void bfa_iocpf_poll_timeout(void *ioc_arg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700164
Jing Huang5fbe25c2010-10-18 17:17:23 -0700165/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700166 * IOCPF state machine events
167 */
168enum iocpf_event {
169 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
170 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
171 IOCPF_E_STOP = 3, /* stop on driver detach */
172 IOCPF_E_FWREADY = 4, /* f/w initialization done */
173 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
174 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
175 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
176 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
177 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
178 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
179 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700180 IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700181};
182
Jing Huang5fbe25c2010-10-18 17:17:23 -0700183/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700184 * IOCPF states
185 */
186enum bfa_iocpf_state {
187 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
188 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
189 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
190 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
191 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
192 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
193 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
194 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
195 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
196};
197
198bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
199bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
200bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
201bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
202bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
203bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
204bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800205bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
206 enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700207bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800208bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700209bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
210bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800211bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
212 enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700213bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
214
215static struct bfa_sm_table_s iocpf_sm_table[] = {
216 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
217 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
218 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
219 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
220 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
221 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
222 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800223 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700224 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800225 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700226 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
227 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800228 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700229 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
230};
231
Jing Huang5fbe25c2010-10-18 17:17:23 -0700232/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700233 * IOC State Machine
234 */
235
Jing Huang5fbe25c2010-10-18 17:17:23 -0700236/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700237 * Beginning state. IOC uninit state.
238 */
239
240static void
241bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
242{
243}
244
Jing Huang5fbe25c2010-10-18 17:17:23 -0700245/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700246 * IOC is in uninit state.
247 */
248static void
249bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
250{
251 bfa_trc(ioc, event);
252
253 switch (event) {
254 case IOC_E_RESET:
255 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
256 break;
257
258 default:
259 bfa_sm_fault(ioc, event);
260 }
261}
Jing Huang5fbe25c2010-10-18 17:17:23 -0700262/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700263 * Reset entry actions -- initialize state machine
264 */
265static void
266bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
267{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700268 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
Jing Huang7725ccf2009-09-23 17:46:15 -0700269}
270
Jing Huang5fbe25c2010-10-18 17:17:23 -0700271/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700272 * IOC is in reset state.
Jing Huang7725ccf2009-09-23 17:46:15 -0700273 */
274static void
275bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
276{
277 bfa_trc(ioc, event);
278
279 switch (event) {
280 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700281 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
282 break;
283
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700284 case IOC_E_DISABLE:
285 bfa_ioc_disable_comp(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -0700286 break;
287
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700288 case IOC_E_DETACH:
289 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Jing Huang7725ccf2009-09-23 17:46:15 -0700290 break;
291
292 default:
293 bfa_sm_fault(ioc, event);
294 }
295}
296
297
298static void
299bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
300{
Maggie Zhangf7f738122010-12-09 19:08:43 -0800301 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -0700302}
303
Jing Huang5fbe25c2010-10-18 17:17:23 -0700304/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700305 * Host IOC function is being enabled, awaiting response from firmware.
306 * Semaphore is acquired.
307 */
308static void
309bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
310{
311 bfa_trc(ioc, event);
312
313 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700314 case IOC_E_ENABLED:
Jing Huang7725ccf2009-09-23 17:46:15 -0700315 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
316 break;
317
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800318 case IOC_E_PFFAILED:
319 /* !!! fall through !!! */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700320 case IOC_E_HWERROR:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800321 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700322 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800323 if (event != IOC_E_PFFAILED)
324 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700325 break;
326
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700327 case IOC_E_HWFAILED:
328 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
329 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
330 break;
331
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700332 case IOC_E_DISABLE:
333 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
334 break;
335
336 case IOC_E_DETACH:
337 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800338 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700339 break;
340
341 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700342 break;
343
344 default:
345 bfa_sm_fault(ioc, event);
346 }
347}
348
349
350static void
351bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
352{
353 bfa_ioc_timer_start(ioc);
354 bfa_ioc_send_getattr(ioc);
355}
356
Jing Huang5fbe25c2010-10-18 17:17:23 -0700357/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700358 * IOC configuration in progress. Timer is active.
359 */
360static void
361bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
362{
363 bfa_trc(ioc, event);
364
365 switch (event) {
366 case IOC_E_FWRSP_GETATTR:
367 bfa_ioc_timer_stop(ioc);
368 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
369 break;
370
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800371 case IOC_E_PFFAILED:
Jing Huang7725ccf2009-09-23 17:46:15 -0700372 case IOC_E_HWERROR:
373 bfa_ioc_timer_stop(ioc);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800374 /* !!! fall through !!! */
Jing Huang7725ccf2009-09-23 17:46:15 -0700375 case IOC_E_TIMEOUT:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800376 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700377 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800378 if (event != IOC_E_PFFAILED)
379 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700380 break;
381
382 case IOC_E_DISABLE:
383 bfa_ioc_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700384 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
385 break;
386
387 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700388 break;
389
390 default:
391 bfa_sm_fault(ioc, event);
392 }
393}
394
Jing Huang7725ccf2009-09-23 17:46:15 -0700395static void
396bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
397{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700398 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
399
Jing Huang7725ccf2009-09-23 17:46:15 -0700400 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
Krishna Gudipatid37779f2011-06-13 15:42:10 -0700401 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -0700402 bfa_ioc_hb_monitor(ioc);
Jing Huang88166242010-12-09 17:11:53 -0800403 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -0700404 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -0700405}
406
407static void
408bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
409{
410 bfa_trc(ioc, event);
411
412 switch (event) {
413 case IOC_E_ENABLE:
414 break;
415
416 case IOC_E_DISABLE:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800417 bfa_hb_timer_stop(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -0700418 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
419 break;
420
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800421 case IOC_E_PFFAILED:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700422 case IOC_E_HWERROR:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800423 bfa_hb_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700424 /* !!! fall through !!! */
Jing Huang7725ccf2009-09-23 17:46:15 -0700425 case IOC_E_HBFAIL:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800426 if (ioc->iocpf.auto_recover)
427 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
428 else
429 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
430
Krishna Gudipati775c7742011-06-13 15:52:12 -0700431 bfa_ioc_fail_notify(ioc);
432
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800433 if (event != IOC_E_PFFAILED)
434 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700435 break;
436
437 default:
438 bfa_sm_fault(ioc, event);
439 }
440}
441
442
443static void
444bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
445{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700446 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
Maggie Zhangf7f738122010-12-09 19:08:43 -0800447 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
Jing Huang88166242010-12-09 17:11:53 -0800448 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -0700449 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -0700450}
451
Jing Huang5fbe25c2010-10-18 17:17:23 -0700452/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700453 * IOC is being disabled
454 */
455static void
456bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
457{
458 bfa_trc(ioc, event);
459
460 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700461 case IOC_E_DISABLED:
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800462 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
463 break;
464
465 case IOC_E_HWERROR:
Jing Huang7725ccf2009-09-23 17:46:15 -0700466 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700467 * No state change. Will move to disabled state
468 * after iocpf sm completes failure processing and
469 * moves to disabled state.
Jing Huang7725ccf2009-09-23 17:46:15 -0700470 */
Maggie Zhangf7f738122010-12-09 19:08:43 -0800471 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700472 break;
473
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700474 case IOC_E_HWFAILED:
475 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
476 bfa_ioc_disable_comp(ioc);
477 break;
478
Jing Huang7725ccf2009-09-23 17:46:15 -0700479 default:
480 bfa_sm_fault(ioc, event);
481 }
482}
483
Jing Huang5fbe25c2010-10-18 17:17:23 -0700484/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700485 * IOC disable completion entry.
486 */
487static void
488bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
489{
490 bfa_ioc_disable_comp(ioc);
491}
492
493static void
494bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
495{
496 bfa_trc(ioc, event);
497
498 switch (event) {
499 case IOC_E_ENABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700500 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700501 break;
502
503 case IOC_E_DISABLE:
504 ioc->cbfn->disable_cbfn(ioc->bfa);
505 break;
506
Jing Huang7725ccf2009-09-23 17:46:15 -0700507 case IOC_E_DETACH:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700508 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800509 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Jing Huang7725ccf2009-09-23 17:46:15 -0700510 break;
511
512 default:
513 bfa_sm_fault(ioc, event);
514 }
515}
516
517
518static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800519bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -0700520{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800521 bfa_trc(ioc, 0);
Jing Huang7725ccf2009-09-23 17:46:15 -0700522}
523
Jing Huang5fbe25c2010-10-18 17:17:23 -0700524/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800525 * Hardware initialization retry.
Jing Huang7725ccf2009-09-23 17:46:15 -0700526 */
527static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800528bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
Jing Huang7725ccf2009-09-23 17:46:15 -0700529{
530 bfa_trc(ioc, event);
531
532 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700533 case IOC_E_ENABLED:
534 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
535 break;
536
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800537 case IOC_E_PFFAILED:
538 case IOC_E_HWERROR:
Jing Huang5fbe25c2010-10-18 17:17:23 -0700539 /*
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800540 * Initialization retry failed.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700541 */
542 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700543 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800544 if (event != IOC_E_PFFAILED)
545 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
546 break;
547
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700548 case IOC_E_HWFAILED:
549 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
550 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
551 break;
552
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800553 case IOC_E_ENABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700554 break;
555
Jing Huang7725ccf2009-09-23 17:46:15 -0700556 case IOC_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700557 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700558 break;
559
560 case IOC_E_DETACH:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700561 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800562 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Jing Huang7725ccf2009-09-23 17:46:15 -0700563 break;
564
565 default:
566 bfa_sm_fault(ioc, event);
567 }
568}
569
570
571static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700572bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -0700573{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800574 bfa_trc(ioc, 0);
Jing Huang7725ccf2009-09-23 17:46:15 -0700575}
576
Jing Huang5fbe25c2010-10-18 17:17:23 -0700577/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700578 * IOC failure.
Jing Huang7725ccf2009-09-23 17:46:15 -0700579 */
580static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700581bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
Jing Huang7725ccf2009-09-23 17:46:15 -0700582{
583 bfa_trc(ioc, event);
584
585 switch (event) {
586
587 case IOC_E_ENABLE:
588 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
589 break;
590
591 case IOC_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700592 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700593 break;
594
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800595 case IOC_E_DETACH:
596 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
597 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
598 break;
599
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800600 case IOC_E_HWERROR:
Krishna Gudipati881c1b32012-08-22 19:52:02 -0700601 case IOC_E_HWFAILED:
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800602 /*
Krishna Gudipati881c1b32012-08-22 19:52:02 -0700603 * HB failure / HW error notification, ignore.
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800604 */
605 break;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700606 default:
607 bfa_sm_fault(ioc, event);
608 }
609}
610
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700611static void
612bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
613{
614 bfa_trc(ioc, 0);
615}
616
617static void
618bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
619{
620 bfa_trc(ioc, event);
621
622 switch (event) {
623 case IOC_E_ENABLE:
624 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
625 break;
626
627 case IOC_E_DISABLE:
628 ioc->cbfn->disable_cbfn(ioc->bfa);
629 break;
630
631 case IOC_E_DETACH:
632 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
633 break;
634
Krishna Gudipati881c1b32012-08-22 19:52:02 -0700635 case IOC_E_HWERROR:
636 /* Ignore - already in hwfail state */
637 break;
638
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700639 default:
640 bfa_sm_fault(ioc, event);
641 }
642}
643
Jing Huang5fbe25c2010-10-18 17:17:23 -0700644/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700645 * IOCPF State Machine
646 */
647
Jing Huang5fbe25c2010-10-18 17:17:23 -0700648/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700649 * Reset entry actions -- initialize state machine
650 */
651static void
652bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
653{
Krishna Gudipati775c7742011-06-13 15:52:12 -0700654 iocpf->fw_mismatch_notified = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700655 iocpf->auto_recover = bfa_auto_recover;
656}
657
Jing Huang5fbe25c2010-10-18 17:17:23 -0700658/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700659 * Beginning state. IOC is in reset state.
660 */
661static void
662bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
663{
664 struct bfa_ioc_s *ioc = iocpf->ioc;
665
666 bfa_trc(ioc, event);
667
668 switch (event) {
669 case IOCPF_E_ENABLE:
670 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
671 break;
672
673 case IOCPF_E_STOP:
674 break;
675
676 default:
677 bfa_sm_fault(ioc, event);
678 }
679}
680
Jing Huang5fbe25c2010-10-18 17:17:23 -0700681/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700682 * Semaphore should be acquired for version check.
683 */
684static void
685bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
686{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700687 struct bfi_ioc_image_hdr_s fwhdr;
Krishna Gudipati89196782012-03-13 17:38:56 -0700688 u32 r32, fwstate, pgnum, pgoff, loff = 0;
689 int i;
690
691 /*
692 * Spin on init semaphore to serialize.
693 */
694 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
695 while (r32 & 0x1) {
696 udelay(20);
697 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
698 }
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700699
700 /* h/w sem init */
Krishna Gudipati89196782012-03-13 17:38:56 -0700701 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
702 if (fwstate == BFI_IOC_UNINIT) {
703 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700704 goto sem_get;
Krishna Gudipati89196782012-03-13 17:38:56 -0700705 }
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700706
707 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
708
Krishna Gudipati89196782012-03-13 17:38:56 -0700709 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
710 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700711 goto sem_get;
Krishna Gudipati89196782012-03-13 17:38:56 -0700712 }
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700713
714 /*
Krishna Gudipati89196782012-03-13 17:38:56 -0700715 * Clear fwver hdr
716 */
717 pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
718 pgoff = PSS_SMEM_PGOFF(loff);
719 writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
720
721 for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
722 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
723 loff += sizeof(u32);
724 }
725
726 bfa_trc(iocpf->ioc, fwstate);
727 bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
728 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
729 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
730
731 /*
732 * Unlock the hw semaphore. Should be here only once per boot.
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700733 */
734 readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
735 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipati89196782012-03-13 17:38:56 -0700736
737 /*
738 * unlock init semaphore.
739 */
740 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
741
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700742sem_get:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700743 bfa_ioc_hw_sem_get(iocpf->ioc);
744}
745
Jing Huang5fbe25c2010-10-18 17:17:23 -0700746/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700747 * Awaiting h/w semaphore to continue with version check.
748 */
749static void
750bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
751{
752 struct bfa_ioc_s *ioc = iocpf->ioc;
753
754 bfa_trc(ioc, event);
755
756 switch (event) {
757 case IOCPF_E_SEMLOCKED:
758 if (bfa_ioc_firmware_lock(ioc)) {
Jing Huang45d7f0c2011-04-13 11:45:53 -0700759 if (bfa_ioc_sync_start(ioc)) {
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800760 bfa_ioc_sync_join(ioc);
761 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
762 } else {
763 bfa_ioc_firmware_unlock(ioc);
764 writel(1, ioc->ioc_regs.ioc_sem_reg);
765 bfa_sem_timer_start(ioc);
766 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700767 } else {
Maggie Zhangf7f738122010-12-09 19:08:43 -0800768 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700769 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
770 }
771 break;
772
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700773 case IOCPF_E_SEM_ERROR:
774 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
775 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
776 break;
777
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700778 case IOCPF_E_DISABLE:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800779 bfa_sem_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700780 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800781 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700782 break;
783
784 case IOCPF_E_STOP:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800785 bfa_sem_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700786 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
787 break;
788
789 default:
790 bfa_sm_fault(ioc, event);
791 }
792}
793
Jing Huang5fbe25c2010-10-18 17:17:23 -0700794/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700795 * Notify enable completion callback.
796 */
797static void
798bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
799{
800 /*
801 * Call only the first time sm enters fwmismatch state.
802 */
Krishna Gudipati775c7742011-06-13 15:52:12 -0700803 if (iocpf->fw_mismatch_notified == BFA_FALSE)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700804 bfa_ioc_pf_fwmismatch(iocpf->ioc);
805
Krishna Gudipati775c7742011-06-13 15:52:12 -0700806 iocpf->fw_mismatch_notified = BFA_TRUE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700807 bfa_iocpf_timer_start(iocpf->ioc);
808}
809
Jing Huang5fbe25c2010-10-18 17:17:23 -0700810/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700811 * Awaiting firmware version match.
812 */
813static void
814bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
815{
816 struct bfa_ioc_s *ioc = iocpf->ioc;
817
818 bfa_trc(ioc, event);
819
820 switch (event) {
821 case IOCPF_E_TIMEOUT:
822 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
823 break;
824
825 case IOCPF_E_DISABLE:
826 bfa_iocpf_timer_stop(ioc);
827 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800828 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700829 break;
830
831 case IOCPF_E_STOP:
832 bfa_iocpf_timer_stop(ioc);
833 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
834 break;
835
836 default:
837 bfa_sm_fault(ioc, event);
838 }
839}
840
Jing Huang5fbe25c2010-10-18 17:17:23 -0700841/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700842 * Request for semaphore.
843 */
844static void
845bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
846{
847 bfa_ioc_hw_sem_get(iocpf->ioc);
848}
849
Jing Huang5fbe25c2010-10-18 17:17:23 -0700850/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700851 * Awaiting semaphore for h/w initialzation.
852 */
853static void
854bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
855{
856 struct bfa_ioc_s *ioc = iocpf->ioc;
857
858 bfa_trc(ioc, event);
859
860 switch (event) {
861 case IOCPF_E_SEMLOCKED:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800862 if (bfa_ioc_sync_complete(ioc)) {
863 bfa_ioc_sync_join(ioc);
864 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
865 } else {
866 writel(1, ioc->ioc_regs.ioc_sem_reg);
867 bfa_sem_timer_start(ioc);
868 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700869 break;
870
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700871 case IOCPF_E_SEM_ERROR:
872 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
873 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
874 break;
875
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700876 case IOCPF_E_DISABLE:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800877 bfa_sem_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800878 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700879 break;
880
881 default:
882 bfa_sm_fault(ioc, event);
883 }
884}
885
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700886static void
887bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
888{
Krishna Gudipati775c7742011-06-13 15:52:12 -0700889 iocpf->poll_time = 0;
Maggie Zhangf7f738122010-12-09 19:08:43 -0800890 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700891}
892
Jing Huang5fbe25c2010-10-18 17:17:23 -0700893/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700894 * Hardware is being initialized. Interrupts are enabled.
895 * Holding hardware semaphore lock.
896 */
897static void
898bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
899{
900 struct bfa_ioc_s *ioc = iocpf->ioc;
901
902 bfa_trc(ioc, event);
903
904 switch (event) {
905 case IOCPF_E_FWREADY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700906 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
907 break;
908
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700909 case IOCPF_E_TIMEOUT:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800910 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700911 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800912 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700913 break;
914
915 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700916 bfa_iocpf_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800917 bfa_ioc_sync_leave(ioc);
918 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700919 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
920 break;
921
922 default:
923 bfa_sm_fault(ioc, event);
924 }
925}
926
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700927static void
928bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
929{
930 bfa_iocpf_timer_start(iocpf->ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700931 /*
932 * Enable Interrupts before sending fw IOC ENABLE cmd.
933 */
934 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700935 bfa_ioc_send_enable(iocpf->ioc);
936}
937
Jing Huang5fbe25c2010-10-18 17:17:23 -0700938/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700939 * Host IOC function is being enabled, awaiting response from firmware.
940 * Semaphore is acquired.
941 */
942static void
943bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
944{
945 struct bfa_ioc_s *ioc = iocpf->ioc;
946
947 bfa_trc(ioc, event);
948
949 switch (event) {
950 case IOCPF_E_FWRSP_ENABLE:
951 bfa_iocpf_timer_stop(ioc);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800952 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700953 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
954 break;
955
956 case IOCPF_E_INITFAIL:
957 bfa_iocpf_timer_stop(ioc);
958 /*
959 * !!! fall through !!!
960 */
961
962 case IOCPF_E_TIMEOUT:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800963 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700964 if (event == IOCPF_E_TIMEOUT)
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800965 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800966 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700967 break;
968
969 case IOCPF_E_DISABLE:
970 bfa_iocpf_timer_stop(ioc);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800971 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700972 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
973 break;
974
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700975 default:
976 bfa_sm_fault(ioc, event);
977 }
978}
979
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700980static void
981bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
982{
Maggie Zhangf7f738122010-12-09 19:08:43 -0800983 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700984}
985
986static void
987bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
988{
989 struct bfa_ioc_s *ioc = iocpf->ioc;
990
991 bfa_trc(ioc, event);
992
993 switch (event) {
994 case IOCPF_E_DISABLE:
995 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
996 break;
997
998 case IOCPF_E_GETATTRFAIL:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800999 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001000 break;
1001
1002 case IOCPF_E_FAIL:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001003 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001004 break;
1005
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001006 default:
1007 bfa_sm_fault(ioc, event);
1008 }
1009}
1010
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001011static void
1012bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1013{
1014 bfa_iocpf_timer_start(iocpf->ioc);
1015 bfa_ioc_send_disable(iocpf->ioc);
1016}
1017
Jing Huang5fbe25c2010-10-18 17:17:23 -07001018/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001019 * IOC is being disabled
1020 */
1021static void
1022bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1023{
1024 struct bfa_ioc_s *ioc = iocpf->ioc;
1025
1026 bfa_trc(ioc, event);
1027
1028 switch (event) {
1029 case IOCPF_E_FWRSP_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001030 bfa_iocpf_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001031 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001032 break;
1033
1034 case IOCPF_E_FAIL:
1035 bfa_iocpf_timer_stop(ioc);
1036 /*
1037 * !!! fall through !!!
1038 */
1039
1040 case IOCPF_E_TIMEOUT:
Jing Huang53440262010-10-18 17:12:29 -07001041 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001042 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001043 break;
1044
1045 case IOCPF_E_FWRSP_ENABLE:
1046 break;
1047
1048 default:
1049 bfa_sm_fault(ioc, event);
1050 }
1051}
1052
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001053static void
1054bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1055{
1056 bfa_ioc_hw_sem_get(iocpf->ioc);
1057}
1058
Jing Huang8f4bfad2010-12-26 21:50:10 -08001059/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001060 * IOC hb ack request is being removed.
1061 */
1062static void
1063bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1064{
1065 struct bfa_ioc_s *ioc = iocpf->ioc;
1066
1067 bfa_trc(ioc, event);
1068
1069 switch (event) {
1070 case IOCPF_E_SEMLOCKED:
1071 bfa_ioc_sync_leave(ioc);
1072 writel(1, ioc->ioc_regs.ioc_sem_reg);
1073 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1074 break;
1075
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001076 case IOCPF_E_SEM_ERROR:
1077 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1078 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1079 break;
1080
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001081 case IOCPF_E_FAIL:
1082 break;
1083
1084 default:
1085 bfa_sm_fault(ioc, event);
1086 }
1087}
1088
Jing Huang5fbe25c2010-10-18 17:17:23 -07001089/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001090 * IOC disable completion entry.
1091 */
1092static void
1093bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1094{
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001095 bfa_ioc_mbox_flush(iocpf->ioc);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001096 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001097}
1098
1099static void
1100bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1101{
1102 struct bfa_ioc_s *ioc = iocpf->ioc;
1103
1104 bfa_trc(ioc, event);
1105
1106 switch (event) {
1107 case IOCPF_E_ENABLE:
1108 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1109 break;
1110
1111 case IOCPF_E_STOP:
1112 bfa_ioc_firmware_unlock(ioc);
1113 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1114 break;
1115
1116 default:
1117 bfa_sm_fault(ioc, event);
1118 }
1119}
1120
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001121static void
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001122bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1123{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001124 bfa_ioc_debug_save_ftrc(iocpf->ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001125 bfa_ioc_hw_sem_get(iocpf->ioc);
1126}
1127
Jing Huang8f4bfad2010-12-26 21:50:10 -08001128/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001129 * Hardware initialization failed.
1130 */
1131static void
1132bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1133{
1134 struct bfa_ioc_s *ioc = iocpf->ioc;
1135
1136 bfa_trc(ioc, event);
1137
1138 switch (event) {
1139 case IOCPF_E_SEMLOCKED:
1140 bfa_ioc_notify_fail(ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -07001141 bfa_ioc_sync_leave(ioc);
1142 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1143 writel(1, ioc->ioc_regs.ioc_sem_reg);
1144 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001145 break;
1146
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001147 case IOCPF_E_SEM_ERROR:
1148 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1149 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1150 break;
1151
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001152 case IOCPF_E_DISABLE:
1153 bfa_sem_timer_stop(ioc);
1154 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1155 break;
1156
1157 case IOCPF_E_STOP:
1158 bfa_sem_timer_stop(ioc);
1159 bfa_ioc_firmware_unlock(ioc);
1160 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1161 break;
1162
1163 case IOCPF_E_FAIL:
1164 break;
1165
1166 default:
1167 bfa_sm_fault(ioc, event);
1168 }
1169}
1170
1171static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001172bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1173{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001174 bfa_trc(iocpf->ioc, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001175}
1176
Jing Huang5fbe25c2010-10-18 17:17:23 -07001177/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001178 * Hardware initialization failed.
1179 */
1180static void
1181bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1182{
1183 struct bfa_ioc_s *ioc = iocpf->ioc;
1184
1185 bfa_trc(ioc, event);
1186
1187 switch (event) {
1188 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001189 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1190 break;
1191
1192 case IOCPF_E_STOP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001193 bfa_ioc_firmware_unlock(ioc);
1194 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1195 break;
1196
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001197 default:
1198 bfa_sm_fault(ioc, event);
1199 }
1200}
1201
1202static void
1203bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1204{
Jing Huang8f4bfad2010-12-26 21:50:10 -08001205 /*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001206 * Mark IOC as failed in hardware and stop firmware.
1207 */
1208 bfa_ioc_lpu_stop(iocpf->ioc);
1209
Jing Huang8f4bfad2010-12-26 21:50:10 -08001210 /*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001211 * Flush any queued up mailbox requests.
1212 */
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001213 bfa_ioc_mbox_flush(iocpf->ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001214
1215 bfa_ioc_hw_sem_get(iocpf->ioc);
1216}
1217
1218static void
1219bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1220{
1221 struct bfa_ioc_s *ioc = iocpf->ioc;
1222
1223 bfa_trc(ioc, event);
1224
1225 switch (event) {
1226 case IOCPF_E_SEMLOCKED:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001227 bfa_ioc_sync_ack(ioc);
1228 bfa_ioc_notify_fail(ioc);
1229 if (!iocpf->auto_recover) {
1230 bfa_ioc_sync_leave(ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -07001231 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001232 writel(1, ioc->ioc_regs.ioc_sem_reg);
1233 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1234 } else {
1235 if (bfa_ioc_sync_complete(ioc))
1236 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1237 else {
1238 writel(1, ioc->ioc_regs.ioc_sem_reg);
1239 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1240 }
1241 }
1242 break;
1243
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001244 case IOCPF_E_SEM_ERROR:
1245 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1246 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1247 break;
1248
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001249 case IOCPF_E_DISABLE:
1250 bfa_sem_timer_stop(ioc);
1251 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1252 break;
1253
1254 case IOCPF_E_FAIL:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001255 break;
1256
1257 default:
1258 bfa_sm_fault(ioc, event);
1259 }
1260}
1261
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001262static void
1263bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1264{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001265 bfa_trc(iocpf->ioc, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001266}
1267
Jing Huang5fbe25c2010-10-18 17:17:23 -07001268/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001269 * IOC is in failed state.
1270 */
1271static void
1272bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1273{
1274 struct bfa_ioc_s *ioc = iocpf->ioc;
1275
1276 bfa_trc(ioc, event);
1277
1278 switch (event) {
1279 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001280 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1281 break;
1282
Jing Huang7725ccf2009-09-23 17:46:15 -07001283 default:
1284 bfa_sm_fault(ioc, event);
1285 }
1286}
1287
Jing Huang5fbe25c2010-10-18 17:17:23 -07001288/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -08001289 * BFA IOC private functions
Jing Huang7725ccf2009-09-23 17:46:15 -07001290 */
1291
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001292/*
1293 * Notify common modules registered for notification.
1294 */
1295static void
1296bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1297{
1298 struct bfa_ioc_notify_s *notify;
1299 struct list_head *qe;
1300
1301 list_for_each(qe, &ioc->notify_q) {
1302 notify = (struct bfa_ioc_notify_s *)qe;
1303 notify->cbfn(notify->cbarg, event);
1304 }
1305}
1306
Jing Huang7725ccf2009-09-23 17:46:15 -07001307static void
1308bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1309{
Jing Huang7725ccf2009-09-23 17:46:15 -07001310 ioc->cbfn->disable_cbfn(ioc->bfa);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001311 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
Jing Huang7725ccf2009-09-23 17:46:15 -07001312}
1313
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001314bfa_boolean_t
Jing Huang53440262010-10-18 17:12:29 -07001315bfa_ioc_sem_get(void __iomem *sem_reg)
Jing Huang7725ccf2009-09-23 17:46:15 -07001316{
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001317 u32 r32;
1318 int cnt = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001319#define BFA_SEM_SPINCNT 3000
Jing Huang7725ccf2009-09-23 17:46:15 -07001320
Jing Huang53440262010-10-18 17:12:29 -07001321 r32 = readl(sem_reg);
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001322
Krishna Gudipati11189202011-06-13 15:50:35 -07001323 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001324 cnt++;
Jing Huang6a18b162010-10-18 17:08:54 -07001325 udelay(2);
Jing Huang53440262010-10-18 17:12:29 -07001326 r32 = readl(sem_reg);
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001327 }
1328
Krishna Gudipati11189202011-06-13 15:50:35 -07001329 if (!(r32 & 1))
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001330 return BFA_TRUE;
1331
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001332 return BFA_FALSE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001333}
1334
Jing Huang7725ccf2009-09-23 17:46:15 -07001335static void
1336bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1337{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001338 u32 r32;
Jing Huang7725ccf2009-09-23 17:46:15 -07001339
Jing Huang5fbe25c2010-10-18 17:17:23 -07001340 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001341 * First read to the semaphore register will return 0, subsequent reads
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001342 * will return 1. Semaphore is released by writing 1 to the register
Jing Huang7725ccf2009-09-23 17:46:15 -07001343 */
Jing Huang53440262010-10-18 17:12:29 -07001344 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001345 if (r32 == ~0) {
1346 WARN_ON(r32 == ~0);
1347 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1348 return;
1349 }
Krishna Gudipati11189202011-06-13 15:50:35 -07001350 if (!(r32 & 1)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001351 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
Jing Huang7725ccf2009-09-23 17:46:15 -07001352 return;
1353 }
1354
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001355 bfa_sem_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001356}
1357
Jing Huang5fbe25c2010-10-18 17:17:23 -07001358/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001359 * Initialize LPU local memory (aka secondary memory / SRAM)
1360 */
1361static void
1362bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1363{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001364 u32 pss_ctl;
1365 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001366#define PSS_LMEM_INIT_TIME 10000
1367
Jing Huang53440262010-10-18 17:12:29 -07001368 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001369 pss_ctl &= ~__PSS_LMEM_RESET;
1370 pss_ctl |= __PSS_LMEM_INIT_EN;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001371
1372 /*
1373 * i2c workaround 12.5khz clock
1374 */
1375 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
Jing Huang53440262010-10-18 17:12:29 -07001376 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001377
Jing Huang5fbe25c2010-10-18 17:17:23 -07001378 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001379 * wait for memory initialization to be complete
1380 */
1381 i = 0;
1382 do {
Jing Huang53440262010-10-18 17:12:29 -07001383 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001384 i++;
1385 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1386
Jing Huang5fbe25c2010-10-18 17:17:23 -07001387 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001388 * If memory initialization is not successful, IOC timeout will catch
1389 * such failures.
1390 */
Jing Huangd4b671c2010-12-26 21:46:35 -08001391 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
Jing Huang7725ccf2009-09-23 17:46:15 -07001392 bfa_trc(ioc, pss_ctl);
1393
1394 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
Jing Huang53440262010-10-18 17:12:29 -07001395 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001396}
1397
1398static void
1399bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1400{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001401 u32 pss_ctl;
Jing Huang7725ccf2009-09-23 17:46:15 -07001402
Jing Huang5fbe25c2010-10-18 17:17:23 -07001403 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001404 * Take processor out of reset.
1405 */
Jing Huang53440262010-10-18 17:12:29 -07001406 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001407 pss_ctl &= ~__PSS_LPU0_RESET;
1408
Jing Huang53440262010-10-18 17:12:29 -07001409 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001410}
1411
1412static void
1413bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1414{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001415 u32 pss_ctl;
Jing Huang7725ccf2009-09-23 17:46:15 -07001416
Jing Huang5fbe25c2010-10-18 17:17:23 -07001417 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001418 * Put processors in reset.
1419 */
Jing Huang53440262010-10-18 17:12:29 -07001420 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001421 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1422
Jing Huang53440262010-10-18 17:12:29 -07001423 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001424}
1425
Jing Huang5fbe25c2010-10-18 17:17:23 -07001426/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001427 * Get driver and firmware versions.
1428 */
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001429void
Jing Huang7725ccf2009-09-23 17:46:15 -07001430bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1431{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001432 u32 pgnum, pgoff;
1433 u32 loff = 0;
1434 int i;
1435 u32 *fwsig = (u32 *) fwhdr;
Jing Huang7725ccf2009-09-23 17:46:15 -07001436
Maggie Zhangf7f738122010-12-09 19:08:43 -08001437 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1438 pgoff = PSS_SMEM_PGOFF(loff);
Jing Huang53440262010-10-18 17:12:29 -07001439 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001440
1441 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1442 i++) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001443 fwsig[i] =
1444 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
Jing Huang7725ccf2009-09-23 17:46:15 -07001445 loff += sizeof(u32);
1446 }
1447}
1448
Jing Huang5fbe25c2010-10-18 17:17:23 -07001449/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001450 * Returns TRUE if same.
1451 */
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001452bfa_boolean_t
Jing Huang7725ccf2009-09-23 17:46:15 -07001453bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1454{
1455 struct bfi_ioc_image_hdr_s *drv_fwhdr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001456 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001457
Jing Huang293f82d2010-07-08 19:45:20 -07001458 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
Krishna Gudipati11189202011-06-13 15:50:35 -07001459 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
Jing Huang7725ccf2009-09-23 17:46:15 -07001460
1461 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
Krishna Gudipati881c1b32012-08-22 19:52:02 -07001462 if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001463 bfa_trc(ioc, i);
1464 bfa_trc(ioc, fwhdr->md5sum[i]);
1465 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1466 return BFA_FALSE;
1467 }
1468 }
1469
1470 bfa_trc(ioc, fwhdr->md5sum[0]);
1471 return BFA_TRUE;
1472}
1473
Jing Huang5fbe25c2010-10-18 17:17:23 -07001474/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001475 * Return true if current running version is valid. Firmware signature and
1476 * execution context (driver/bios) must match.
1477 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001478static bfa_boolean_t
1479bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07001480{
1481 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1482
Jing Huang7725ccf2009-09-23 17:46:15 -07001483 bfa_ioc_fwver_get(ioc, &fwhdr);
Jing Huang293f82d2010-07-08 19:45:20 -07001484 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
Krishna Gudipati11189202011-06-13 15:50:35 -07001485 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
Jing Huang7725ccf2009-09-23 17:46:15 -07001486
Krishna Gudipati881c1b32012-08-22 19:52:02 -07001487 if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001488 bfa_trc(ioc, fwhdr.signature);
1489 bfa_trc(ioc, drv_fwhdr->signature);
1490 return BFA_FALSE;
1491 }
1492
Krishna Gudipati11189202011-06-13 15:50:35 -07001493 if (swab32(fwhdr.bootenv) != boot_env) {
1494 bfa_trc(ioc, fwhdr.bootenv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001495 bfa_trc(ioc, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07001496 return BFA_FALSE;
1497 }
1498
1499 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1500}
1501
Jing Huang5fbe25c2010-10-18 17:17:23 -07001502/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001503 * Conditionally flush any pending message from firmware at start.
1504 */
1505static void
1506bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1507{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001508 u32 r32;
Jing Huang7725ccf2009-09-23 17:46:15 -07001509
Jing Huang53440262010-10-18 17:12:29 -07001510 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001511 if (r32)
Jing Huang53440262010-10-18 17:12:29 -07001512 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001513}
1514
Jing Huang7725ccf2009-09-23 17:46:15 -07001515static void
1516bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1517{
1518 enum bfi_ioc_state ioc_fwstate;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001519 bfa_boolean_t fwvalid;
1520 u32 boot_type;
1521 u32 boot_env;
Jing Huang7725ccf2009-09-23 17:46:15 -07001522
Jing Huang53440262010-10-18 17:12:29 -07001523 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07001524
1525 if (force)
1526 ioc_fwstate = BFI_IOC_UNINIT;
1527
1528 bfa_trc(ioc, ioc_fwstate);
1529
Krishna Gudipati11189202011-06-13 15:50:35 -07001530 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1531 boot_env = BFI_FWBOOT_ENV_OS;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001532
Jing Huang5fbe25c2010-10-18 17:17:23 -07001533 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001534 * check if firmware is valid
1535 */
1536 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001537 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07001538
1539 if (!fwvalid) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001540 bfa_ioc_boot(ioc, boot_type, boot_env);
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001541 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001542 return;
1543 }
1544
Jing Huang5fbe25c2010-10-18 17:17:23 -07001545 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001546 * If hardware initialization is in progress (initialized by other IOC),
1547 * just wait for an initialization completion interrupt.
1548 */
1549 if (ioc_fwstate == BFI_IOC_INITING) {
Krishna Gudipati775c7742011-06-13 15:52:12 -07001550 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001551 return;
1552 }
1553
Jing Huang5fbe25c2010-10-18 17:17:23 -07001554 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001555 * If IOC function is disabled and firmware version is same,
1556 * just re-enable IOC.
Jing Huang07b28382010-07-08 19:59:24 -07001557 *
1558 * If option rom, IOC must not be in operational state. With
1559 * convergence, IOC will be in operational state when 2nd driver
1560 * is loaded.
Jing Huang7725ccf2009-09-23 17:46:15 -07001561 */
Jing Huang8f4bfad2010-12-26 21:50:10 -08001562 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001563
Jing Huang5fbe25c2010-10-18 17:17:23 -07001564 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001565 * When using MSI-X any pending firmware ready event should
1566 * be flushed. Otherwise MSI-X interrupts are not delivered.
1567 */
1568 bfa_ioc_msgflush(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001569 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
Jing Huang7725ccf2009-09-23 17:46:15 -07001570 return;
1571 }
1572
Jing Huang5fbe25c2010-10-18 17:17:23 -07001573 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001574 * Initialize the h/w for any other states.
1575 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001576 bfa_ioc_boot(ioc, boot_type, boot_env);
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001577 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001578}
1579
1580static void
1581bfa_ioc_timeout(void *ioc_arg)
1582{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001583 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
Jing Huang7725ccf2009-09-23 17:46:15 -07001584
1585 bfa_trc(ioc, 0);
1586 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1587}
1588
1589void
1590bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1591{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001592 u32 *msgp = (u32 *) ioc_msg;
1593 u32 i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001594
1595 bfa_trc(ioc, msgp[0]);
1596 bfa_trc(ioc, len);
1597
Jing Huangd4b671c2010-12-26 21:46:35 -08001598 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
Jing Huang7725ccf2009-09-23 17:46:15 -07001599
1600 /*
1601 * first write msg to mailbox registers
1602 */
1603 for (i = 0; i < len / sizeof(u32); i++)
Jing Huang53440262010-10-18 17:12:29 -07001604 writel(cpu_to_le32(msgp[i]),
1605 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
Jing Huang7725ccf2009-09-23 17:46:15 -07001606
1607 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
Jing Huang53440262010-10-18 17:12:29 -07001608 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
Jing Huang7725ccf2009-09-23 17:46:15 -07001609
1610 /*
1611 * write 1 to mailbox CMD to trigger LPU event
1612 */
Jing Huang53440262010-10-18 17:12:29 -07001613 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1614 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001615}
1616
1617static void
1618bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1619{
1620 struct bfi_ioc_ctrl_req_s enable_req;
Maggie Zhangf16a1752010-12-09 19:12:32 -08001621 struct timeval tv;
Jing Huang7725ccf2009-09-23 17:46:15 -07001622
1623 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1624 bfa_ioc_portid(ioc));
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001625 enable_req.clscode = cpu_to_be16(ioc->clscode);
Maggie Zhangf16a1752010-12-09 19:12:32 -08001626 do_gettimeofday(&tv);
Jing Huangba816ea2010-10-18 17:10:50 -07001627 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
Jing Huang7725ccf2009-09-23 17:46:15 -07001628 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1629}
1630
1631static void
1632bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1633{
1634 struct bfi_ioc_ctrl_req_s disable_req;
1635
1636 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1637 bfa_ioc_portid(ioc));
1638 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1639}
1640
1641static void
1642bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1643{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001644 struct bfi_ioc_getattr_req_s attr_req;
Jing Huang7725ccf2009-09-23 17:46:15 -07001645
1646 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1647 bfa_ioc_portid(ioc));
1648 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1649 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1650}
1651
1652static void
1653bfa_ioc_hb_check(void *cbarg)
1654{
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001655 struct bfa_ioc_s *ioc = cbarg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001656 u32 hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07001657
Jing Huang53440262010-10-18 17:12:29 -07001658 hb_count = readl(ioc->ioc_regs.heartbeat);
Jing Huang7725ccf2009-09-23 17:46:15 -07001659 if (ioc->hb_count == hb_count) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001660 bfa_ioc_recover(ioc);
1661 return;
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001662 } else {
1663 ioc->hb_count = hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07001664 }
1665
1666 bfa_ioc_mbox_poll(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001667 bfa_hb_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001668}
1669
1670static void
1671bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1672{
Jing Huang53440262010-10-18 17:12:29 -07001673 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001674 bfa_hb_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001675}
1676
Jing Huang5fbe25c2010-10-18 17:17:23 -07001677/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001678 * Initiate a full firmware download.
Jing Huang7725ccf2009-09-23 17:46:15 -07001679 */
1680static void
1681bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001682 u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07001683{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001684 u32 *fwimg;
1685 u32 pgnum, pgoff;
1686 u32 loff = 0;
1687 u32 chunkno = 0;
1688 u32 i;
Krishna Gudipati11189202011-06-13 15:50:35 -07001689 u32 asicmode;
Jing Huang7725ccf2009-09-23 17:46:15 -07001690
Krishna Gudipati11189202011-06-13 15:50:35 -07001691 bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1692 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
Jing Huang7725ccf2009-09-23 17:46:15 -07001693
Maggie Zhangf7f738122010-12-09 19:08:43 -08001694 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1695 pgoff = PSS_SMEM_PGOFF(loff);
Jing Huang7725ccf2009-09-23 17:46:15 -07001696
Jing Huang53440262010-10-18 17:12:29 -07001697 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001698
Krishna Gudipati11189202011-06-13 15:50:35 -07001699 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001700
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001701 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1702 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
Krishna Gudipati11189202011-06-13 15:50:35 -07001703 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001704 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
Jing Huang7725ccf2009-09-23 17:46:15 -07001705 }
1706
Jing Huang5fbe25c2010-10-18 17:17:23 -07001707 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001708 * write smem
1709 */
1710 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
Krishna Gudipati881c1b32012-08-22 19:52:02 -07001711 cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]));
Jing Huang7725ccf2009-09-23 17:46:15 -07001712
1713 loff += sizeof(u32);
1714
Jing Huang5fbe25c2010-10-18 17:17:23 -07001715 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001716 * handle page offset wrap around
1717 */
1718 loff = PSS_SMEM_PGOFF(loff);
1719 if (loff == 0) {
1720 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001721 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001722 }
1723 }
1724
Maggie Zhangf7f738122010-12-09 19:08:43 -08001725 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1726 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipati13cc20c2010-03-05 19:37:29 -08001727
1728 /*
Krishna Gudipati11189202011-06-13 15:50:35 -07001729 * Set boot type and device mode at the end.
1730 */
1731 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1732 ioc->port0_mode, ioc->port1_mode);
1733 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1734 swab32(asicmode));
1735 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
Jing Huang53440262010-10-18 17:12:29 -07001736 swab32(boot_type));
Krishna Gudipati11189202011-06-13 15:50:35 -07001737 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
Jing Huang53440262010-10-18 17:12:29 -07001738 swab32(boot_env));
Jing Huang7725ccf2009-09-23 17:46:15 -07001739}
1740
Jing Huang7725ccf2009-09-23 17:46:15 -07001741
Jing Huang5fbe25c2010-10-18 17:17:23 -07001742/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001743 * Update BFA configuration from firmware configuration.
1744 */
1745static void
1746bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1747{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001748 struct bfi_ioc_attr_s *attr = ioc->attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07001749
Jing Huangba816ea2010-10-18 17:10:50 -07001750 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1751 attr->card_type = be32_to_cpu(attr->card_type);
1752 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001753 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
Jing Huang7725ccf2009-09-23 17:46:15 -07001754
1755 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1756}
1757
Jing Huang5fbe25c2010-10-18 17:17:23 -07001758/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001759 * Attach time initialization of mbox logic.
1760 */
1761static void
1762bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1763{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001764 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1765 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07001766
1767 INIT_LIST_HEAD(&mod->cmd_q);
1768 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1769 mod->mbhdlr[mc].cbfn = NULL;
1770 mod->mbhdlr[mc].cbarg = ioc->bfa;
1771 }
1772}
1773
Jing Huang5fbe25c2010-10-18 17:17:23 -07001774/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001775 * Mbox poll timer -- restarts any pending mailbox requests.
1776 */
1777static void
1778bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1779{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001780 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1781 struct bfa_mbox_cmd_s *cmd;
1782 u32 stat;
Jing Huang7725ccf2009-09-23 17:46:15 -07001783
Jing Huang5fbe25c2010-10-18 17:17:23 -07001784 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001785 * If no command pending, do nothing
1786 */
1787 if (list_empty(&mod->cmd_q))
1788 return;
1789
Jing Huang5fbe25c2010-10-18 17:17:23 -07001790 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001791 * If previous command is not yet fetched by firmware, do nothing
1792 */
Jing Huang53440262010-10-18 17:12:29 -07001793 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001794 if (stat)
1795 return;
1796
Jing Huang5fbe25c2010-10-18 17:17:23 -07001797 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001798 * Enqueue command to firmware.
1799 */
1800 bfa_q_deq(&mod->cmd_q, &cmd);
1801 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1802}
1803
Jing Huang5fbe25c2010-10-18 17:17:23 -07001804/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001805 * Cleanup any pending requests.
1806 */
1807static void
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001808bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -07001809{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001810 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1811 struct bfa_mbox_cmd_s *cmd;
Jing Huang7725ccf2009-09-23 17:46:15 -07001812
1813 while (!list_empty(&mod->cmd_q))
1814 bfa_q_deq(&mod->cmd_q, &cmd);
1815}
1816
Jing Huang5fbe25c2010-10-18 17:17:23 -07001817/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001818 * Read data from SMEM to host through PCI memmap
1819 *
1820 * @param[in] ioc memory for IOC
1821 * @param[in] tbuf app memory to store data from smem
1822 * @param[in] soff smem offset
1823 * @param[in] sz size of smem in bytes
Jing Huang7725ccf2009-09-23 17:46:15 -07001824 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001825static bfa_status_t
1826bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1827{
Maggie50444a32010-11-29 18:26:32 -08001828 u32 pgnum, loff;
1829 __be32 r32;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001830 int i, len;
1831 u32 *buf = tbuf;
1832
Maggie Zhangf7f738122010-12-09 19:08:43 -08001833 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1834 loff = PSS_SMEM_PGOFF(soff);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001835 bfa_trc(ioc, pgnum);
1836 bfa_trc(ioc, loff);
1837 bfa_trc(ioc, sz);
1838
1839 /*
1840 * Hold semaphore to serialize pll init and fwtrc.
1841 */
1842 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1843 bfa_trc(ioc, 0);
1844 return BFA_STATUS_FAILED;
1845 }
1846
Jing Huang53440262010-10-18 17:12:29 -07001847 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001848
1849 len = sz/sizeof(u32);
1850 bfa_trc(ioc, len);
1851 for (i = 0; i < len; i++) {
1852 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
Jing Huangba816ea2010-10-18 17:10:50 -07001853 buf[i] = be32_to_cpu(r32);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001854 loff += sizeof(u32);
1855
Jing Huang5fbe25c2010-10-18 17:17:23 -07001856 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001857 * handle page offset wrap around
1858 */
1859 loff = PSS_SMEM_PGOFF(loff);
1860 if (loff == 0) {
1861 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001862 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001863 }
1864 }
Maggie Zhangf7f738122010-12-09 19:08:43 -08001865 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1866 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001867 /*
1868 * release semaphore.
1869 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001870 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001871 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001872
1873 bfa_trc(ioc, pgnum);
1874 return BFA_STATUS_OK;
1875}
1876
Jing Huang5fbe25c2010-10-18 17:17:23 -07001877/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001878 * Clear SMEM data from host through PCI memmap
1879 *
1880 * @param[in] ioc memory for IOC
1881 * @param[in] soff smem offset
1882 * @param[in] sz size of smem in bytes
1883 */
1884static bfa_status_t
1885bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1886{
1887 int i, len;
1888 u32 pgnum, loff;
1889
Maggie Zhangf7f738122010-12-09 19:08:43 -08001890 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1891 loff = PSS_SMEM_PGOFF(soff);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001892 bfa_trc(ioc, pgnum);
1893 bfa_trc(ioc, loff);
1894 bfa_trc(ioc, sz);
1895
1896 /*
1897 * Hold semaphore to serialize pll init and fwtrc.
1898 */
1899 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1900 bfa_trc(ioc, 0);
1901 return BFA_STATUS_FAILED;
1902 }
1903
Jing Huang53440262010-10-18 17:12:29 -07001904 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001905
1906 len = sz/sizeof(u32); /* len in words */
1907 bfa_trc(ioc, len);
1908 for (i = 0; i < len; i++) {
1909 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1910 loff += sizeof(u32);
1911
Jing Huang5fbe25c2010-10-18 17:17:23 -07001912 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001913 * handle page offset wrap around
1914 */
1915 loff = PSS_SMEM_PGOFF(loff);
1916 if (loff == 0) {
1917 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001918 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001919 }
1920 }
Maggie Zhangf7f738122010-12-09 19:08:43 -08001921 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1922 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001923
1924 /*
1925 * release semaphore.
1926 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001927 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001928 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001929 bfa_trc(ioc, pgnum);
1930 return BFA_STATUS_OK;
1931}
1932
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001933static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001934bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1935{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001936 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1937
Jing Huang8f4bfad2010-12-26 21:50:10 -08001938 /*
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001939 * Notify driver and common modules registered for notification.
1940 */
1941 ioc->cbfn->hbfail_cbfn(ioc->bfa);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001942 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001943
1944 bfa_ioc_debug_save_ftrc(ioc);
1945
1946 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1947 "Heart Beat of IOC has failed\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -07001948 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001949
1950}
1951
1952static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001953bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1954{
1955 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
Jing Huang5fbe25c2010-10-18 17:17:23 -07001956 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001957 * Provide enable completion callback.
1958 */
1959 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Jing Huang88166242010-12-09 17:11:53 -08001960 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001961 "Running firmware version is incompatible "
1962 "with the driver version\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -07001963 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001964}
1965
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001966bfa_status_t
1967bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1968{
1969
1970 /*
1971 * Hold semaphore so that nobody can access the chip during init.
1972 */
1973 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1974
1975 bfa_ioc_pll_init_asic(ioc);
1976
1977 ioc->pllinit = BFA_TRUE;
Krishna Gudipati89196782012-03-13 17:38:56 -07001978
1979 /*
1980 * Initialize LMEM
1981 */
1982 bfa_ioc_lmem_init(ioc);
1983
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001984 /*
1985 * release semaphore.
1986 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001987 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001988 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001989
1990 return BFA_STATUS_OK;
1991}
Jing Huang7725ccf2009-09-23 17:46:15 -07001992
Jing Huang5fbe25c2010-10-18 17:17:23 -07001993/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001994 * Interface used by diag module to do firmware boot with memory test
1995 * as the entry vector.
1996 */
1997void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001998bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07001999{
Jing Huang7725ccf2009-09-23 17:46:15 -07002000 bfa_ioc_stats(ioc, ioc_boots);
2001
2002 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2003 return;
2004
Jing Huang5fbe25c2010-10-18 17:17:23 -07002005 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002006 * Initialize IOC state of all functions on a chip reset.
2007 */
Krishna Gudipati11189202011-06-13 15:50:35 -07002008 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2009 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2010 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002011 } else {
Krishna Gudipati11189202011-06-13 15:50:35 -07002012 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2013 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002014 }
2015
Jing Huang07b28382010-07-08 19:59:24 -07002016 bfa_ioc_msgflush(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002017 bfa_ioc_download_fw(ioc, boot_type, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07002018 bfa_ioc_lpu_start(ioc);
2019}
2020
Jing Huang5fbe25c2010-10-18 17:17:23 -07002021/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002022 * Enable/disable IOC failure auto recovery.
2023 */
2024void
2025bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2026{
Krishna Gudipati2f9b8852010-03-03 17:42:51 -08002027 bfa_auto_recover = auto_recover;
Jing Huang7725ccf2009-09-23 17:46:15 -07002028}
2029
2030
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002031
Jing Huang7725ccf2009-09-23 17:46:15 -07002032bfa_boolean_t
2033bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2034{
2035 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2036}
2037
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002038bfa_boolean_t
2039bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2040{
Jing Huang53440262010-10-18 17:12:29 -07002041 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002042
2043 return ((r32 != BFI_IOC_UNINIT) &&
2044 (r32 != BFI_IOC_INITING) &&
2045 (r32 != BFI_IOC_MEMTEST));
2046}
2047
Krishna Gudipati11189202011-06-13 15:50:35 -07002048bfa_boolean_t
Jing Huang7725ccf2009-09-23 17:46:15 -07002049bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2050{
Maggie50444a32010-11-29 18:26:32 -08002051 __be32 *msgp = mbmsg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002052 u32 r32;
2053 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07002054
Krishna Gudipati11189202011-06-13 15:50:35 -07002055 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2056 if ((r32 & 1) == 0)
2057 return BFA_FALSE;
2058
Jing Huang5fbe25c2010-10-18 17:17:23 -07002059 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002060 * read the MBOX msg
2061 */
2062 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2063 i++) {
Jing Huang53440262010-10-18 17:12:29 -07002064 r32 = readl(ioc->ioc_regs.lpu_mbox +
Jing Huang7725ccf2009-09-23 17:46:15 -07002065 i * sizeof(u32));
Jing Huangba816ea2010-10-18 17:10:50 -07002066 msgp[i] = cpu_to_be32(r32);
Jing Huang7725ccf2009-09-23 17:46:15 -07002067 }
2068
Jing Huang5fbe25c2010-10-18 17:17:23 -07002069 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002070 * turn off mailbox interrupt by clearing mailbox status
2071 */
Jing Huang53440262010-10-18 17:12:29 -07002072 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2073 readl(ioc->ioc_regs.lpu_mbox_cmd);
Krishna Gudipati11189202011-06-13 15:50:35 -07002074
2075 return BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -07002076}
2077
2078void
2079bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2080{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002081 union bfi_ioc_i2h_msg_u *msg;
2082 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
Jing Huang7725ccf2009-09-23 17:46:15 -07002083
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002084 msg = (union bfi_ioc_i2h_msg_u *) m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002085
2086 bfa_ioc_stats(ioc, ioc_isrs);
2087
2088 switch (msg->mh.msg_id) {
2089 case BFI_IOC_I2H_HBEAT:
2090 break;
2091
Jing Huang7725ccf2009-09-23 17:46:15 -07002092 case BFI_IOC_I2H_ENABLE_REPLY:
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002093 ioc->port_mode = ioc->port_mode_cfg =
2094 (enum bfa_mode_s)msg->fw_event.port_mode;
2095 ioc->ad_cap_bm = msg->fw_event.cap_bm;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002096 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -07002097 break;
2098
2099 case BFI_IOC_I2H_DISABLE_REPLY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002100 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -07002101 break;
2102
2103 case BFI_IOC_I2H_GETATTR_REPLY:
2104 bfa_ioc_getattr_reply(ioc);
2105 break;
2106
2107 default:
2108 bfa_trc(ioc, msg->mh.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08002109 WARN_ON(1);
Jing Huang7725ccf2009-09-23 17:46:15 -07002110 }
2111}
2112
Jing Huang5fbe25c2010-10-18 17:17:23 -07002113/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002114 * IOC attach time initialization and setup.
2115 *
2116 * @param[in] ioc memory for IOC
2117 * @param[in] bfa driver instance structure
Jing Huang7725ccf2009-09-23 17:46:15 -07002118 */
2119void
2120bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002121 struct bfa_timer_mod_s *timer_mod)
Jing Huang7725ccf2009-09-23 17:46:15 -07002122{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002123 ioc->bfa = bfa;
2124 ioc->cbfn = cbfn;
2125 ioc->timer_mod = timer_mod;
2126 ioc->fcmode = BFA_FALSE;
2127 ioc->pllinit = BFA_FALSE;
Jing Huang7725ccf2009-09-23 17:46:15 -07002128 ioc->dbg_fwsave_once = BFA_TRUE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002129 ioc->iocpf.ioc = ioc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002130
2131 bfa_ioc_mbox_attach(ioc);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002132 INIT_LIST_HEAD(&ioc->notify_q);
Jing Huang7725ccf2009-09-23 17:46:15 -07002133
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002134 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2135 bfa_fsm_send_event(ioc, IOC_E_RESET);
Jing Huang7725ccf2009-09-23 17:46:15 -07002136}
2137
Jing Huang5fbe25c2010-10-18 17:17:23 -07002138/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002139 * Driver detach time IOC cleanup.
2140 */
2141void
2142bfa_ioc_detach(struct bfa_ioc_s *ioc)
2143{
2144 bfa_fsm_send_event(ioc, IOC_E_DETACH);
Krishna Gudipati3350d982011-06-24 20:28:37 -07002145 INIT_LIST_HEAD(&ioc->notify_q);
Jing Huang7725ccf2009-09-23 17:46:15 -07002146}
2147
Jing Huang5fbe25c2010-10-18 17:17:23 -07002148/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002149 * Setup IOC PCI properties.
2150 *
2151 * @param[in] pcidev PCI device information for this IOC
2152 */
2153void
2154bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002155 enum bfi_pcifn_class clscode)
Jing Huang7725ccf2009-09-23 17:46:15 -07002156{
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002157 ioc->clscode = clscode;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002158 ioc->pcidev = *pcidev;
Krishna Gudipati11189202011-06-13 15:50:35 -07002159
2160 /*
2161 * Initialize IOC and device personality
2162 */
2163 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2164 ioc->asic_mode = BFI_ASIC_MODE_FC;
2165
2166 switch (pcidev->device_id) {
2167 case BFA_PCI_DEVICE_ID_FC_8G1P:
2168 case BFA_PCI_DEVICE_ID_FC_8G2P:
2169 ioc->asic_gen = BFI_ASIC_GEN_CB;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002170 ioc->fcmode = BFA_TRUE;
2171 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2172 ioc->ad_cap_bm = BFA_CM_HBA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002173 break;
2174
2175 case BFA_PCI_DEVICE_ID_CT:
2176 ioc->asic_gen = BFI_ASIC_GEN_CT;
2177 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2178 ioc->asic_mode = BFI_ASIC_MODE_ETH;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002179 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2180 ioc->ad_cap_bm = BFA_CM_CNA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002181 break;
2182
2183 case BFA_PCI_DEVICE_ID_CT_FC:
2184 ioc->asic_gen = BFI_ASIC_GEN_CT;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002185 ioc->fcmode = BFA_TRUE;
2186 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2187 ioc->ad_cap_bm = BFA_CM_HBA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002188 break;
2189
2190 case BFA_PCI_DEVICE_ID_CT2:
2191 ioc->asic_gen = BFI_ASIC_GEN_CT2;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002192 if (clscode == BFI_PCIFN_CLASS_FC &&
2193 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
Krishna Gudipati11189202011-06-13 15:50:35 -07002194 ioc->asic_mode = BFI_ASIC_MODE_FC16;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002195 ioc->fcmode = BFA_TRUE;
2196 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2197 ioc->ad_cap_bm = BFA_CM_HBA;
2198 } else {
Krishna Gudipati11189202011-06-13 15:50:35 -07002199 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002200 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2201 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2202 ioc->port_mode =
2203 ioc->port_mode_cfg = BFA_MODE_CNA;
2204 ioc->ad_cap_bm = BFA_CM_CNA;
2205 } else {
2206 ioc->port_mode =
2207 ioc->port_mode_cfg = BFA_MODE_NIC;
2208 ioc->ad_cap_bm = BFA_CM_NIC;
2209 }
Krishna Gudipati11189202011-06-13 15:50:35 -07002210 }
2211 break;
2212
2213 default:
2214 WARN_ON(1);
2215 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002216
Jing Huang5fbe25c2010-10-18 17:17:23 -07002217 /*
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002218 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2219 */
Krishna Gudipati11189202011-06-13 15:50:35 -07002220 if (ioc->asic_gen == BFI_ASIC_GEN_CB)
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002221 bfa_ioc_set_cb_hwif(ioc);
Krishna Gudipati11189202011-06-13 15:50:35 -07002222 else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2223 bfa_ioc_set_ct_hwif(ioc);
2224 else {
2225 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2226 bfa_ioc_set_ct2_hwif(ioc);
2227 bfa_ioc_ct2_poweron(ioc);
2228 }
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002229
Jing Huang7725ccf2009-09-23 17:46:15 -07002230 bfa_ioc_map_port(ioc);
2231 bfa_ioc_reg_init(ioc);
2232}
2233
Jing Huang5fbe25c2010-10-18 17:17:23 -07002234/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002235 * Initialize IOC dma memory
2236 *
2237 * @param[in] dm_kva kernel virtual address of IOC dma memory
2238 * @param[in] dm_pa physical address of IOC dma memory
2239 */
2240void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002241bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
Jing Huang7725ccf2009-09-23 17:46:15 -07002242{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002243 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002244 * dma memory for firmware attribute
2245 */
2246 ioc->attr_dma.kva = dm_kva;
2247 ioc->attr_dma.pa = dm_pa;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002248 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
Jing Huang7725ccf2009-09-23 17:46:15 -07002249}
2250
Jing Huang7725ccf2009-09-23 17:46:15 -07002251void
2252bfa_ioc_enable(struct bfa_ioc_s *ioc)
2253{
2254 bfa_ioc_stats(ioc, ioc_enables);
2255 ioc->dbg_fwsave_once = BFA_TRUE;
2256
2257 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2258}
2259
2260void
2261bfa_ioc_disable(struct bfa_ioc_s *ioc)
2262{
2263 bfa_ioc_stats(ioc, ioc_disables);
2264 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2265}
2266
Krishna Gudipati881c1b32012-08-22 19:52:02 -07002267void
2268bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2269{
2270 ioc->dbg_fwsave_once = BFA_TRUE;
2271 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2272}
Jing Huang7725ccf2009-09-23 17:46:15 -07002273
Jing Huang5fbe25c2010-10-18 17:17:23 -07002274/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002275 * Initialize memory for saving firmware trace. Driver must initialize
2276 * trace memory before call bfa_ioc_enable().
2277 */
2278void
2279bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2280{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002281 ioc->dbg_fwsave = dbg_fwsave;
Krishna Gudipati881c1b32012-08-22 19:52:02 -07002282 ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
Jing Huang7725ccf2009-09-23 17:46:15 -07002283}
2284
Jing Huang5fbe25c2010-10-18 17:17:23 -07002285/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002286 * Register mailbox message handler functions
2287 *
2288 * @param[in] ioc IOC instance
2289 * @param[in] mcfuncs message class handler functions
2290 */
2291void
2292bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2293{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002294 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2295 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002296
2297 for (mc = 0; mc < BFI_MC_MAX; mc++)
2298 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2299}
2300
Jing Huang5fbe25c2010-10-18 17:17:23 -07002301/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002302 * Register mailbox message handler function, to be called by common modules
2303 */
2304void
2305bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2306 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2307{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002308 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
Jing Huang7725ccf2009-09-23 17:46:15 -07002309
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002310 mod->mbhdlr[mc].cbfn = cbfn;
2311 mod->mbhdlr[mc].cbarg = cbarg;
Jing Huang7725ccf2009-09-23 17:46:15 -07002312}
2313
Jing Huang5fbe25c2010-10-18 17:17:23 -07002314/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002315 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2316 * Responsibility of caller to serialize
2317 *
2318 * @param[in] ioc IOC instance
2319 * @param[i] cmd Mailbox command
2320 */
2321void
2322bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2323{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002324 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2325 u32 stat;
Jing Huang7725ccf2009-09-23 17:46:15 -07002326
Jing Huang5fbe25c2010-10-18 17:17:23 -07002327 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002328 * If a previous command is pending, queue new command
2329 */
2330 if (!list_empty(&mod->cmd_q)) {
2331 list_add_tail(&cmd->qe, &mod->cmd_q);
2332 return;
2333 }
2334
Jing Huang5fbe25c2010-10-18 17:17:23 -07002335 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002336 * If mailbox is busy, queue command for poll timer
2337 */
Jing Huang53440262010-10-18 17:12:29 -07002338 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07002339 if (stat) {
2340 list_add_tail(&cmd->qe, &mod->cmd_q);
2341 return;
2342 }
2343
Jing Huang5fbe25c2010-10-18 17:17:23 -07002344 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002345 * mailbox is free -- queue command to firmware
2346 */
2347 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2348}
2349
Jing Huang5fbe25c2010-10-18 17:17:23 -07002350/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002351 * Handle mailbox interrupts
2352 */
2353void
2354bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2355{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002356 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2357 struct bfi_mbmsg_s m;
2358 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002359
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002360 if (bfa_ioc_msgget(ioc, &m)) {
2361 /*
2362 * Treat IOC message class as special.
2363 */
2364 mc = m.mh.msg_class;
2365 if (mc == BFI_MC_IOC) {
2366 bfa_ioc_isr(ioc, &m);
2367 return;
2368 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002369
Dan Carpenterfffa6922012-06-27 11:59:36 +03002370 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002371 return;
2372
2373 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
Jing Huang7725ccf2009-09-23 17:46:15 -07002374 }
2375
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002376 bfa_ioc_lpu_read_stat(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002377
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002378 /*
2379 * Try to send pending mailbox commands
2380 */
2381 bfa_ioc_mbox_poll(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002382}
2383
2384void
2385bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2386{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002387 bfa_ioc_stats(ioc, ioc_hbfails);
2388 ioc->stats.hb_count = ioc->hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07002389 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2390}
2391
Jing Huang5fbe25c2010-10-18 17:17:23 -07002392/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002393 * return true if IOC is disabled
2394 */
2395bfa_boolean_t
2396bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2397{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002398 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2399 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
Jing Huang7725ccf2009-09-23 17:46:15 -07002400}
2401
Jing Huang5fbe25c2010-10-18 17:17:23 -07002402/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002403 * return true if IOC firmware is different.
2404 */
2405bfa_boolean_t
2406bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2407{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002408 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2409 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2410 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
Jing Huang7725ccf2009-09-23 17:46:15 -07002411}
2412
2413#define bfa_ioc_state_disabled(__sm) \
2414 (((__sm) == BFI_IOC_UNINIT) || \
2415 ((__sm) == BFI_IOC_INITING) || \
2416 ((__sm) == BFI_IOC_HWINIT) || \
2417 ((__sm) == BFI_IOC_DISABLED) || \
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002418 ((__sm) == BFI_IOC_FAIL) || \
Jing Huang7725ccf2009-09-23 17:46:15 -07002419 ((__sm) == BFI_IOC_CFG_DISABLED))
2420
Jing Huang5fbe25c2010-10-18 17:17:23 -07002421/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002422 * Check if adapter is disabled -- both IOCs should be in a disabled
2423 * state.
2424 */
2425bfa_boolean_t
2426bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2427{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002428 u32 ioc_state;
Jing Huang7725ccf2009-09-23 17:46:15 -07002429
2430 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2431 return BFA_FALSE;
2432
Krishna Gudipati11189202011-06-13 15:50:35 -07002433 ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002434 if (!bfa_ioc_state_disabled(ioc_state))
2435 return BFA_FALSE;
2436
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002437 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
Krishna Gudipati11189202011-06-13 15:50:35 -07002438 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002439 if (!bfa_ioc_state_disabled(ioc_state))
2440 return BFA_FALSE;
2441 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002442
2443 return BFA_TRUE;
2444}
2445
Jing Huang8f4bfad2010-12-26 21:50:10 -08002446/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08002447 * Reset IOC fwstate registers.
2448 */
2449void
2450bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2451{
2452 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2453 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2454}
2455
Jing Huang7725ccf2009-09-23 17:46:15 -07002456#define BFA_MFG_NAME "Brocade"
2457void
2458bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2459 struct bfa_adapter_attr_s *ad_attr)
2460{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002461 struct bfi_ioc_attr_s *ioc_attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07002462
2463 ioc_attr = ioc->attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07002464
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002465 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2466 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2467 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2468 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
Jing Huang6a18b162010-10-18 17:08:54 -07002469 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
Jing Huang7725ccf2009-09-23 17:46:15 -07002470 sizeof(struct bfa_mfg_vpd_s));
2471
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002472 ad_attr->nports = bfa_ioc_get_nports(ioc);
2473 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002474
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002475 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2476 /* For now, model descr uses same model string */
2477 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
Jing Huang7725ccf2009-09-23 17:46:15 -07002478
Jing Huanged969322010-07-08 19:45:56 -07002479 ad_attr->card_type = ioc_attr->card_type;
2480 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2481
Jing Huang7725ccf2009-09-23 17:46:15 -07002482 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2483 ad_attr->prototype = 1;
2484 else
2485 ad_attr->prototype = 0;
2486
Maggie Zhangf7f738122010-12-09 19:08:43 -08002487 ad_attr->pwwn = ioc->attr->pwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002488 ad_attr->mac = bfa_ioc_get_mac(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002489
2490 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2491 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2492 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2493 ad_attr->asic_rev = ioc_attr->asic_rev;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002494
2495 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
Jing Huang7725ccf2009-09-23 17:46:15 -07002496
Krishna Gudipati11189202011-06-13 15:50:35 -07002497 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2498 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2499 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
Jing Huang7725ccf2009-09-23 17:46:15 -07002500}
2501
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002502enum bfa_ioc_type_e
2503bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2504{
Krishna Gudipati11189202011-06-13 15:50:35 -07002505 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002506 return BFA_IOC_TYPE_LL;
Krishna Gudipati11189202011-06-13 15:50:35 -07002507
2508 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2509
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002510 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
Krishna Gudipati11189202011-06-13 15:50:35 -07002511 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002512}
2513
Jing Huang7725ccf2009-09-23 17:46:15 -07002514void
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002515bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2516{
Jing Huang6a18b162010-10-18 17:08:54 -07002517 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2518 memcpy((void *)serial_num,
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002519 (void *)ioc->attr->brcd_serialnum,
2520 BFA_ADAPTER_SERIAL_NUM_LEN);
2521}
2522
2523void
2524bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2525{
Jing Huang6a18b162010-10-18 17:08:54 -07002526 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2527 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002528}
2529
2530void
2531bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2532{
Jing Huangd4b671c2010-12-26 21:46:35 -08002533 WARN_ON(!chip_rev);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002534
Jing Huang6a18b162010-10-18 17:08:54 -07002535 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002536
2537 chip_rev[0] = 'R';
2538 chip_rev[1] = 'e';
2539 chip_rev[2] = 'v';
2540 chip_rev[3] = '-';
2541 chip_rev[4] = ioc->attr->asic_rev;
2542 chip_rev[5] = '\0';
2543}
2544
2545void
2546bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2547{
Jing Huang6a18b162010-10-18 17:08:54 -07002548 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2549 memcpy(optrom_ver, ioc->attr->optrom_version,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002550 BFA_VERSION_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002551}
2552
2553void
2554bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2555{
Jing Huang6a18b162010-10-18 17:08:54 -07002556 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2557 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002558}
2559
2560void
2561bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2562{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002563 struct bfi_ioc_attr_s *ioc_attr;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002564
Jing Huangd4b671c2010-12-26 21:46:35 -08002565 WARN_ON(!model);
Jing Huang6a18b162010-10-18 17:08:54 -07002566 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002567
2568 ioc_attr = ioc->attr;
2569
Krishna Gudipati10a07372011-06-24 20:23:38 -07002570 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002571 BFA_MFG_NAME, ioc_attr->card_type);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002572}
2573
2574enum bfa_ioc_state
2575bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2576{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002577 enum bfa_iocpf_state iocpf_st;
2578 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2579
2580 if (ioc_st == BFA_IOC_ENABLING ||
2581 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2582
2583 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2584
2585 switch (iocpf_st) {
2586 case BFA_IOCPF_SEMWAIT:
2587 ioc_st = BFA_IOC_SEMWAIT;
2588 break;
2589
2590 case BFA_IOCPF_HWINIT:
2591 ioc_st = BFA_IOC_HWINIT;
2592 break;
2593
2594 case BFA_IOCPF_FWMISMATCH:
2595 ioc_st = BFA_IOC_FWMISMATCH;
2596 break;
2597
2598 case BFA_IOCPF_FAIL:
2599 ioc_st = BFA_IOC_FAIL;
2600 break;
2601
2602 case BFA_IOCPF_INITFAIL:
2603 ioc_st = BFA_IOC_INITFAIL;
2604 break;
2605
2606 default:
2607 break;
2608 }
2609 }
2610
2611 return ioc_st;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002612}
2613
2614void
Jing Huang7725ccf2009-09-23 17:46:15 -07002615bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2616{
Jing Huang6a18b162010-10-18 17:08:54 -07002617 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
Jing Huang7725ccf2009-09-23 17:46:15 -07002618
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002619 ioc_attr->state = bfa_ioc_get_state(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002620 ioc_attr->port_id = ioc->port_id;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002621 ioc_attr->port_mode = ioc->port_mode;
2622 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2623 ioc_attr->cap_bm = ioc->ad_cap_bm;
Jing Huang7725ccf2009-09-23 17:46:15 -07002624
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002625 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002626
2627 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2628
2629 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2630 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002631 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
Jing Huang7725ccf2009-09-23 17:46:15 -07002632}
2633
Jing Huang7725ccf2009-09-23 17:46:15 -07002634mac_t
2635bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2636{
Jing Huang15b64a82010-07-08 19:48:12 -07002637 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002638 * Check the IOC type and return the appropriate MAC
Jing Huang15b64a82010-07-08 19:48:12 -07002639 */
2640 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002641 return ioc->attr->fcoe_mac;
Jing Huang15b64a82010-07-08 19:48:12 -07002642 else
2643 return ioc->attr->mac;
2644}
2645
Jing Huang15b64a82010-07-08 19:48:12 -07002646mac_t
2647bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2648{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002649 mac_t m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002650
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002651 m = ioc->attr->mfg_mac;
2652 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2653 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2654 else
2655 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2656 bfa_ioc_pcifn(ioc));
Jing Huang7725ccf2009-09-23 17:46:15 -07002657
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002658 return m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002659}
2660
Jing Huang5fbe25c2010-10-18 17:17:23 -07002661/*
Krishna Gudipati7826f302011-07-20 16:59:13 -07002662 * Send AEN notification
2663 */
2664void
2665bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2666{
2667 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2668 struct bfa_aen_entry_s *aen_entry;
2669 enum bfa_ioc_type_e ioc_type;
2670
2671 bfad_get_aen_entry(bfad, aen_entry);
2672 if (!aen_entry)
2673 return;
2674
2675 ioc_type = bfa_ioc_get_type(ioc);
2676 switch (ioc_type) {
2677 case BFA_IOC_TYPE_FC:
2678 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2679 break;
2680 case BFA_IOC_TYPE_FCoE:
2681 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2682 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2683 break;
2684 case BFA_IOC_TYPE_LL:
2685 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2686 break;
2687 default:
2688 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2689 break;
2690 }
2691
2692 /* Send the AEN notification */
2693 aen_entry->aen_data.ioc.ioc_type = ioc_type;
2694 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2695 BFA_AEN_CAT_IOC, event);
2696}
2697
2698/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002699 * Retrieve saved firmware trace from a prior IOC failure.
2700 */
2701bfa_status_t
2702bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2703{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002704 int tlen;
Jing Huang7725ccf2009-09-23 17:46:15 -07002705
2706 if (ioc->dbg_fwsave_len == 0)
2707 return BFA_STATUS_ENOFSAVE;
2708
2709 tlen = *trclen;
2710 if (tlen > ioc->dbg_fwsave_len)
2711 tlen = ioc->dbg_fwsave_len;
2712
Jing Huang6a18b162010-10-18 17:08:54 -07002713 memcpy(trcdata, ioc->dbg_fwsave, tlen);
Jing Huang7725ccf2009-09-23 17:46:15 -07002714 *trclen = tlen;
2715 return BFA_STATUS_OK;
2716}
2717
Krishna Gudipati738c9e62010-03-05 19:36:19 -08002718
Jing Huang5fbe25c2010-10-18 17:17:23 -07002719/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002720 * Retrieve saved firmware trace from a prior IOC failure.
2721 */
2722bfa_status_t
2723bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2724{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002725 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2726 int tlen;
2727 bfa_status_t status;
Jing Huang7725ccf2009-09-23 17:46:15 -07002728
2729 bfa_trc(ioc, *trclen);
2730
Jing Huang7725ccf2009-09-23 17:46:15 -07002731 tlen = *trclen;
2732 if (tlen > BFA_DBG_FWTRC_LEN)
2733 tlen = BFA_DBG_FWTRC_LEN;
Jing Huang7725ccf2009-09-23 17:46:15 -07002734
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002735 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2736 *trclen = tlen;
2737 return status;
2738}
Jing Huang7725ccf2009-09-23 17:46:15 -07002739
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002740static void
2741bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2742{
2743 struct bfa_mbox_cmd_s cmd;
2744 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
Jing Huang7725ccf2009-09-23 17:46:15 -07002745
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002746 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2747 bfa_ioc_portid(ioc));
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002748 req->clscode = cpu_to_be16(ioc->clscode);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002749 bfa_ioc_mbox_queue(ioc, &cmd);
2750}
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002751
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002752static void
2753bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2754{
2755 u32 fwsync_iter = 1000;
2756
2757 bfa_ioc_send_fwsync(ioc);
2758
Jing Huang5fbe25c2010-10-18 17:17:23 -07002759 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002760 * After sending a fw sync mbox command wait for it to
2761 * take effect. We will not wait for a response because
2762 * 1. fw_sync mbox cmd doesn't have a response.
2763 * 2. Even if we implement that, interrupts might not
2764 * be enabled when we call this function.
2765 * So, just keep checking if any mbox cmd is pending, and
2766 * after waiting for a reasonable amount of time, go ahead.
2767 * It is possible that fw has crashed and the mbox command
2768 * is never acknowledged.
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002769 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002770 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2771 fwsync_iter--;
2772}
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002773
Jing Huang5fbe25c2010-10-18 17:17:23 -07002774/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002775 * Dump firmware smem
2776 */
2777bfa_status_t
2778bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2779 u32 *offset, int *buflen)
2780{
2781 u32 loff;
2782 int dlen;
2783 bfa_status_t status;
2784 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002785
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002786 if (*offset >= smem_len) {
2787 *offset = *buflen = 0;
2788 return BFA_STATUS_EINVAL;
2789 }
2790
2791 loff = *offset;
2792 dlen = *buflen;
2793
Jing Huang5fbe25c2010-10-18 17:17:23 -07002794 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002795 * First smem read, sync smem before proceeding
2796 * No need to sync before reading every chunk.
2797 */
2798 if (loff == 0)
2799 bfa_ioc_fwsync(ioc);
2800
2801 if ((loff + dlen) >= smem_len)
2802 dlen = smem_len - loff;
2803
2804 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2805
2806 if (status != BFA_STATUS_OK) {
2807 *offset = *buflen = 0;
2808 return status;
2809 }
2810
2811 *offset += dlen;
2812
2813 if (*offset >= smem_len)
2814 *offset = 0;
2815
2816 *buflen = dlen;
2817
2818 return status;
2819}
2820
Jing Huang5fbe25c2010-10-18 17:17:23 -07002821/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002822 * Firmware statistics
2823 */
2824bfa_status_t
2825bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2826{
2827 u32 loff = BFI_IOC_FWSTATS_OFF + \
2828 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2829 int tlen;
2830 bfa_status_t status;
2831
2832 if (ioc->stats_busy) {
2833 bfa_trc(ioc, ioc->stats_busy);
2834 return BFA_STATUS_DEVBUSY;
2835 }
2836 ioc->stats_busy = BFA_TRUE;
2837
2838 tlen = sizeof(struct bfa_fw_stats_s);
2839 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2840
2841 ioc->stats_busy = BFA_FALSE;
2842 return status;
2843}
2844
2845bfa_status_t
2846bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2847{
2848 u32 loff = BFI_IOC_FWSTATS_OFF + \
2849 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2850 int tlen;
2851 bfa_status_t status;
2852
2853 if (ioc->stats_busy) {
2854 bfa_trc(ioc, ioc->stats_busy);
2855 return BFA_STATUS_DEVBUSY;
2856 }
2857 ioc->stats_busy = BFA_TRUE;
2858
2859 tlen = sizeof(struct bfa_fw_stats_s);
2860 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2861
2862 ioc->stats_busy = BFA_FALSE;
2863 return status;
Jing Huang7725ccf2009-09-23 17:46:15 -07002864}
2865
Jing Huang5fbe25c2010-10-18 17:17:23 -07002866/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002867 * Save firmware trace if configured.
2868 */
Krishna Gudipati881c1b32012-08-22 19:52:02 -07002869void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08002870bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -07002871{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002872 int tlen;
Jing Huang7725ccf2009-09-23 17:46:15 -07002873
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08002874 if (ioc->dbg_fwsave_once) {
2875 ioc->dbg_fwsave_once = BFA_FALSE;
2876 if (ioc->dbg_fwsave_len) {
2877 tlen = ioc->dbg_fwsave_len;
2878 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2879 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002880 }
2881}
2882
Jing Huang5fbe25c2010-10-18 17:17:23 -07002883/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002884 * Firmware failure detected. Start recovery actions.
2885 */
2886static void
2887bfa_ioc_recover(struct bfa_ioc_s *ioc)
2888{
Jing Huang7725ccf2009-09-23 17:46:15 -07002889 bfa_ioc_stats(ioc, ioc_hbfails);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002890 ioc->stats.hb_count = ioc->hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07002891 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2892}
2893
Jing Huang5fbe25c2010-10-18 17:17:23 -07002894/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -08002895 * BFA IOC PF private functions
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002896 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002897static void
2898bfa_iocpf_timeout(void *ioc_arg)
2899{
2900 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2901
2902 bfa_trc(ioc, 0);
2903 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2904}
2905
2906static void
2907bfa_iocpf_sem_timeout(void *ioc_arg)
2908{
2909 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2910
2911 bfa_ioc_hw_sem_get(ioc);
2912}
2913
Krishna Gudipati775c7742011-06-13 15:52:12 -07002914static void
2915bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2916{
2917 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2918
2919 bfa_trc(ioc, fwstate);
2920
2921 if (fwstate == BFI_IOC_DISABLED) {
2922 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2923 return;
2924 }
2925
2926 if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2927 bfa_iocpf_timeout(ioc);
2928 else {
2929 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2930 bfa_iocpf_poll_timer_start(ioc);
2931 }
2932}
2933
2934static void
2935bfa_iocpf_poll_timeout(void *ioc_arg)
2936{
2937 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2938
2939 bfa_ioc_poll_fwinit(ioc);
2940}
2941
Jing Huang5fbe25c2010-10-18 17:17:23 -07002942/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002943 * bfa timer function
2944 */
2945void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002946bfa_timer_beat(struct bfa_timer_mod_s *mod)
2947{
2948 struct list_head *qh = &mod->timer_q;
2949 struct list_head *qe, *qe_next;
2950 struct bfa_timer_s *elem;
2951 struct list_head timedout_q;
2952
2953 INIT_LIST_HEAD(&timedout_q);
2954
2955 qe = bfa_q_next(qh);
2956
2957 while (qe != qh) {
2958 qe_next = bfa_q_next(qe);
2959
2960 elem = (struct bfa_timer_s *) qe;
2961 if (elem->timeout <= BFA_TIMER_FREQ) {
2962 elem->timeout = 0;
2963 list_del(&elem->qe);
2964 list_add_tail(&elem->qe, &timedout_q);
2965 } else {
2966 elem->timeout -= BFA_TIMER_FREQ;
2967 }
2968
2969 qe = qe_next; /* go to next elem */
2970 }
2971
2972 /*
2973 * Pop all the timeout entries
2974 */
2975 while (!list_empty(&timedout_q)) {
2976 bfa_q_deq(&timedout_q, &elem);
2977 elem->timercb(elem->arg);
2978 }
2979}
2980
Jing Huang5fbe25c2010-10-18 17:17:23 -07002981/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002982 * Should be called with lock protection
2983 */
2984void
2985bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2986 void (*timercb) (void *), void *arg, unsigned int timeout)
2987{
2988
Jing Huangd4b671c2010-12-26 21:46:35 -08002989 WARN_ON(timercb == NULL);
2990 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002991
2992 timer->timeout = timeout;
2993 timer->timercb = timercb;
2994 timer->arg = arg;
2995
2996 list_add_tail(&timer->qe, &mod->timer_q);
2997}
2998
Jing Huang5fbe25c2010-10-18 17:17:23 -07002999/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003000 * Should be called with lock protection
3001 */
3002void
3003bfa_timer_stop(struct bfa_timer_s *timer)
3004{
Jing Huangd4b671c2010-12-26 21:46:35 -08003005 WARN_ON(list_empty(&timer->qe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003006
3007 list_del(&timer->qe);
3008}
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07003009
3010/*
3011 * ASIC block related
3012 */
3013static void
3014bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3015{
3016 struct bfa_ablk_cfg_inst_s *cfg_inst;
3017 int i, j;
3018 u16 be16;
3019 u32 be32;
3020
3021 for (i = 0; i < BFA_ABLK_MAX; i++) {
3022 cfg_inst = &cfg->inst[i];
3023 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3024 be16 = cfg_inst->pf_cfg[j].pers;
3025 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3026 be16 = cfg_inst->pf_cfg[j].num_qpairs;
3027 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3028 be16 = cfg_inst->pf_cfg[j].num_vectors;
3029 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3030 be32 = cfg_inst->pf_cfg[j].bw;
3031 cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3032 }
3033 }
3034}
3035
3036static void
3037bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3038{
3039 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3040 struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3041 bfa_ablk_cbfn_t cbfn;
3042
3043 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3044 bfa_trc(ablk->ioc, msg->mh.msg_id);
3045
3046 switch (msg->mh.msg_id) {
3047 case BFI_ABLK_I2H_QUERY:
3048 if (rsp->status == BFA_STATUS_OK) {
3049 memcpy(ablk->cfg, ablk->dma_addr.kva,
3050 sizeof(struct bfa_ablk_cfg_s));
3051 bfa_ablk_config_swap(ablk->cfg);
3052 ablk->cfg = NULL;
3053 }
3054 break;
3055
3056 case BFI_ABLK_I2H_ADPT_CONFIG:
3057 case BFI_ABLK_I2H_PORT_CONFIG:
3058 /* update config port mode */
3059 ablk->ioc->port_mode_cfg = rsp->port_mode;
3060
3061 case BFI_ABLK_I2H_PF_DELETE:
3062 case BFI_ABLK_I2H_PF_UPDATE:
3063 case BFI_ABLK_I2H_OPTROM_ENABLE:
3064 case BFI_ABLK_I2H_OPTROM_DISABLE:
3065 /* No-op */
3066 break;
3067
3068 case BFI_ABLK_I2H_PF_CREATE:
3069 *(ablk->pcifn) = rsp->pcifn;
3070 ablk->pcifn = NULL;
3071 break;
3072
3073 default:
3074 WARN_ON(1);
3075 }
3076
3077 ablk->busy = BFA_FALSE;
3078 if (ablk->cbfn) {
3079 cbfn = ablk->cbfn;
3080 ablk->cbfn = NULL;
3081 cbfn(ablk->cbarg, rsp->status);
3082 }
3083}
3084
3085static void
3086bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3087{
3088 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3089
3090 bfa_trc(ablk->ioc, event);
3091
3092 switch (event) {
3093 case BFA_IOC_E_ENABLED:
3094 WARN_ON(ablk->busy != BFA_FALSE);
3095 break;
3096
3097 case BFA_IOC_E_DISABLED:
3098 case BFA_IOC_E_FAILED:
3099 /* Fail any pending requests */
3100 ablk->pcifn = NULL;
3101 if (ablk->busy) {
3102 if (ablk->cbfn)
3103 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3104 ablk->cbfn = NULL;
3105 ablk->busy = BFA_FALSE;
3106 }
3107 break;
3108
3109 default:
3110 WARN_ON(1);
3111 break;
3112 }
3113}
3114
3115u32
3116bfa_ablk_meminfo(void)
3117{
3118 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3119}
3120
3121void
3122bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3123{
3124 ablk->dma_addr.kva = dma_kva;
3125 ablk->dma_addr.pa = dma_pa;
3126}
3127
3128void
3129bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3130{
3131 ablk->ioc = ioc;
3132
3133 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
Krishna Gudipati3350d982011-06-24 20:28:37 -07003134 bfa_q_qe_init(&ablk->ioc_notify);
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07003135 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3136 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3137}
3138
3139bfa_status_t
3140bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3141 bfa_ablk_cbfn_t cbfn, void *cbarg)
3142{
3143 struct bfi_ablk_h2i_query_s *m;
3144
3145 WARN_ON(!ablk_cfg);
3146
3147 if (!bfa_ioc_is_operational(ablk->ioc)) {
3148 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3149 return BFA_STATUS_IOC_FAILURE;
3150 }
3151
3152 if (ablk->busy) {
3153 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3154 return BFA_STATUS_DEVBUSY;
3155 }
3156
3157 ablk->cfg = ablk_cfg;
3158 ablk->cbfn = cbfn;
3159 ablk->cbarg = cbarg;
3160 ablk->busy = BFA_TRUE;
3161
3162 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3163 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3164 bfa_ioc_portid(ablk->ioc));
3165 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3166 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3167
3168 return BFA_STATUS_OK;
3169}
3170
3171bfa_status_t
3172bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3173 u8 port, enum bfi_pcifn_class personality, int bw,
3174 bfa_ablk_cbfn_t cbfn, void *cbarg)
3175{
3176 struct bfi_ablk_h2i_pf_req_s *m;
3177
3178 if (!bfa_ioc_is_operational(ablk->ioc)) {
3179 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3180 return BFA_STATUS_IOC_FAILURE;
3181 }
3182
3183 if (ablk->busy) {
3184 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3185 return BFA_STATUS_DEVBUSY;
3186 }
3187
3188 ablk->pcifn = pcifn;
3189 ablk->cbfn = cbfn;
3190 ablk->cbarg = cbarg;
3191 ablk->busy = BFA_TRUE;
3192
3193 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3194 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3195 bfa_ioc_portid(ablk->ioc));
3196 m->pers = cpu_to_be16((u16)personality);
3197 m->bw = cpu_to_be32(bw);
3198 m->port = port;
3199 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3200
3201 return BFA_STATUS_OK;
3202}
3203
3204bfa_status_t
3205bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3206 bfa_ablk_cbfn_t cbfn, void *cbarg)
3207{
3208 struct bfi_ablk_h2i_pf_req_s *m;
3209
3210 if (!bfa_ioc_is_operational(ablk->ioc)) {
3211 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3212 return BFA_STATUS_IOC_FAILURE;
3213 }
3214
3215 if (ablk->busy) {
3216 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3217 return BFA_STATUS_DEVBUSY;
3218 }
3219
3220 ablk->cbfn = cbfn;
3221 ablk->cbarg = cbarg;
3222 ablk->busy = BFA_TRUE;
3223
3224 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3225 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3226 bfa_ioc_portid(ablk->ioc));
3227 m->pcifn = (u8)pcifn;
3228 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3229
3230 return BFA_STATUS_OK;
3231}
3232
3233bfa_status_t
3234bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3235 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3236{
3237 struct bfi_ablk_h2i_cfg_req_s *m;
3238
3239 if (!bfa_ioc_is_operational(ablk->ioc)) {
3240 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3241 return BFA_STATUS_IOC_FAILURE;
3242 }
3243
3244 if (ablk->busy) {
3245 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3246 return BFA_STATUS_DEVBUSY;
3247 }
3248
3249 ablk->cbfn = cbfn;
3250 ablk->cbarg = cbarg;
3251 ablk->busy = BFA_TRUE;
3252
3253 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3254 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3255 bfa_ioc_portid(ablk->ioc));
3256 m->mode = (u8)mode;
3257 m->max_pf = (u8)max_pf;
3258 m->max_vf = (u8)max_vf;
3259 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3260
3261 return BFA_STATUS_OK;
3262}
3263
3264bfa_status_t
3265bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3266 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3267{
3268 struct bfi_ablk_h2i_cfg_req_s *m;
3269
3270 if (!bfa_ioc_is_operational(ablk->ioc)) {
3271 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3272 return BFA_STATUS_IOC_FAILURE;
3273 }
3274
3275 if (ablk->busy) {
3276 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3277 return BFA_STATUS_DEVBUSY;
3278 }
3279
3280 ablk->cbfn = cbfn;
3281 ablk->cbarg = cbarg;
3282 ablk->busy = BFA_TRUE;
3283
3284 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3285 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3286 bfa_ioc_portid(ablk->ioc));
3287 m->port = (u8)port;
3288 m->mode = (u8)mode;
3289 m->max_pf = (u8)max_pf;
3290 m->max_vf = (u8)max_vf;
3291 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3292
3293 return BFA_STATUS_OK;
3294}
3295
3296bfa_status_t
3297bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3298 bfa_ablk_cbfn_t cbfn, void *cbarg)
3299{
3300 struct bfi_ablk_h2i_pf_req_s *m;
3301
3302 if (!bfa_ioc_is_operational(ablk->ioc)) {
3303 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3304 return BFA_STATUS_IOC_FAILURE;
3305 }
3306
3307 if (ablk->busy) {
3308 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3309 return BFA_STATUS_DEVBUSY;
3310 }
3311
3312 ablk->cbfn = cbfn;
3313 ablk->cbarg = cbarg;
3314 ablk->busy = BFA_TRUE;
3315
3316 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3317 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3318 bfa_ioc_portid(ablk->ioc));
3319 m->pcifn = (u8)pcifn;
3320 m->bw = cpu_to_be32(bw);
3321 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3322
3323 return BFA_STATUS_OK;
3324}
3325
3326bfa_status_t
3327bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3328{
3329 struct bfi_ablk_h2i_optrom_s *m;
3330
3331 if (!bfa_ioc_is_operational(ablk->ioc)) {
3332 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3333 return BFA_STATUS_IOC_FAILURE;
3334 }
3335
3336 if (ablk->busy) {
3337 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3338 return BFA_STATUS_DEVBUSY;
3339 }
3340
3341 ablk->cbfn = cbfn;
3342 ablk->cbarg = cbarg;
3343 ablk->busy = BFA_TRUE;
3344
3345 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3346 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3347 bfa_ioc_portid(ablk->ioc));
3348 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3349
3350 return BFA_STATUS_OK;
3351}
3352
3353bfa_status_t
3354bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3355{
3356 struct bfi_ablk_h2i_optrom_s *m;
3357
3358 if (!bfa_ioc_is_operational(ablk->ioc)) {
3359 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3360 return BFA_STATUS_IOC_FAILURE;
3361 }
3362
3363 if (ablk->busy) {
3364 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3365 return BFA_STATUS_DEVBUSY;
3366 }
3367
3368 ablk->cbfn = cbfn;
3369 ablk->cbarg = cbarg;
3370 ablk->busy = BFA_TRUE;
3371
3372 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3373 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3374 bfa_ioc_portid(ablk->ioc));
3375 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3376
3377 return BFA_STATUS_OK;
3378}
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003379
3380/*
3381 * SFP module specific
3382 */
3383
3384/* forward declarations */
3385static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3386static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3387static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3388 enum bfa_port_speed portspeed);
3389
3390static void
3391bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3392{
3393 bfa_trc(sfp, sfp->lock);
3394 if (sfp->cbfn)
3395 sfp->cbfn(sfp->cbarg, sfp->status);
3396 sfp->lock = 0;
3397 sfp->cbfn = NULL;
3398}
3399
3400static void
3401bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3402{
3403 bfa_trc(sfp, sfp->portspeed);
3404 if (sfp->media) {
3405 bfa_sfp_media_get(sfp);
3406 if (sfp->state_query_cbfn)
3407 sfp->state_query_cbfn(sfp->state_query_cbarg,
3408 sfp->status);
3409 sfp->media = NULL;
3410 }
3411
3412 if (sfp->portspeed) {
3413 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3414 if (sfp->state_query_cbfn)
3415 sfp->state_query_cbfn(sfp->state_query_cbarg,
3416 sfp->status);
3417 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3418 }
3419
3420 sfp->state_query_lock = 0;
3421 sfp->state_query_cbfn = NULL;
3422}
3423
3424/*
3425 * IOC event handler.
3426 */
3427static void
3428bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3429{
3430 struct bfa_sfp_s *sfp = sfp_arg;
3431
3432 bfa_trc(sfp, event);
3433 bfa_trc(sfp, sfp->lock);
3434 bfa_trc(sfp, sfp->state_query_lock);
3435
3436 switch (event) {
3437 case BFA_IOC_E_DISABLED:
3438 case BFA_IOC_E_FAILED:
3439 if (sfp->lock) {
3440 sfp->status = BFA_STATUS_IOC_FAILURE;
3441 bfa_cb_sfp_show(sfp);
3442 }
3443
3444 if (sfp->state_query_lock) {
3445 sfp->status = BFA_STATUS_IOC_FAILURE;
3446 bfa_cb_sfp_state_query(sfp);
3447 }
3448 break;
3449
3450 default:
3451 break;
3452 }
3453}
3454
3455/*
Krishna Gudipati7826f302011-07-20 16:59:13 -07003456 * SFP's State Change Notification post to AEN
3457 */
3458static void
3459bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3460{
3461 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3462 struct bfa_aen_entry_s *aen_entry;
3463 enum bfa_port_aen_event aen_evt = 0;
3464
3465 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3466 ((u64)rsp->event));
3467
3468 bfad_get_aen_entry(bfad, aen_entry);
3469 if (!aen_entry)
3470 return;
3471
3472 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3473 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3474 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3475
3476 switch (rsp->event) {
3477 case BFA_SFP_SCN_INSERTED:
3478 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3479 break;
3480 case BFA_SFP_SCN_REMOVED:
3481 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3482 break;
3483 case BFA_SFP_SCN_FAILED:
3484 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3485 break;
3486 case BFA_SFP_SCN_UNSUPPORT:
3487 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3488 break;
3489 case BFA_SFP_SCN_POM:
3490 aen_evt = BFA_PORT_AEN_SFP_POM;
3491 aen_entry->aen_data.port.level = rsp->pomlvl;
3492 break;
3493 default:
3494 bfa_trc(sfp, rsp->event);
3495 WARN_ON(1);
3496 }
3497
3498 /* Send the AEN notification */
3499 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3500 BFA_AEN_CAT_PORT, aen_evt);
3501}
3502
3503/*
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003504 * SFP get data send
3505 */
3506static void
3507bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3508{
3509 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3510
3511 bfa_trc(sfp, req->memtype);
3512
3513 /* build host command */
3514 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3515 bfa_ioc_portid(sfp->ioc));
3516
3517 /* send mbox cmd */
3518 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3519}
3520
3521/*
3522 * SFP is valid, read sfp data
3523 */
3524static void
3525bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3526{
3527 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3528
3529 WARN_ON(sfp->lock != 0);
3530 bfa_trc(sfp, sfp->state);
3531
3532 sfp->lock = 1;
3533 sfp->memtype = memtype;
3534 req->memtype = memtype;
3535
3536 /* Setup SG list */
3537 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3538
3539 bfa_sfp_getdata_send(sfp);
3540}
3541
3542/*
Krishna Gudipati7826f302011-07-20 16:59:13 -07003543 * SFP scn handler
3544 */
3545static void
3546bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3547{
3548 struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3549
3550 switch (rsp->event) {
3551 case BFA_SFP_SCN_INSERTED:
3552 sfp->state = BFA_SFP_STATE_INSERTED;
3553 sfp->data_valid = 0;
3554 bfa_sfp_scn_aen_post(sfp, rsp);
3555 break;
3556 case BFA_SFP_SCN_REMOVED:
3557 sfp->state = BFA_SFP_STATE_REMOVED;
3558 sfp->data_valid = 0;
3559 bfa_sfp_scn_aen_post(sfp, rsp);
3560 break;
3561 case BFA_SFP_SCN_FAILED:
3562 sfp->state = BFA_SFP_STATE_FAILED;
3563 sfp->data_valid = 0;
3564 bfa_sfp_scn_aen_post(sfp, rsp);
3565 break;
3566 case BFA_SFP_SCN_UNSUPPORT:
3567 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3568 bfa_sfp_scn_aen_post(sfp, rsp);
3569 if (!sfp->lock)
3570 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3571 break;
3572 case BFA_SFP_SCN_POM:
3573 bfa_sfp_scn_aen_post(sfp, rsp);
3574 break;
3575 case BFA_SFP_SCN_VALID:
3576 sfp->state = BFA_SFP_STATE_VALID;
3577 if (!sfp->lock)
3578 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3579 break;
3580 default:
3581 bfa_trc(sfp, rsp->event);
3582 WARN_ON(1);
3583 }
3584}
3585
3586/*
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003587 * SFP show complete
3588 */
3589static void
3590bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3591{
3592 struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3593
3594 if (!sfp->lock) {
3595 /*
3596 * receiving response after ioc failure
3597 */
3598 bfa_trc(sfp, sfp->lock);
3599 return;
3600 }
3601
3602 bfa_trc(sfp, rsp->status);
3603 if (rsp->status == BFA_STATUS_OK) {
3604 sfp->data_valid = 1;
3605 if (sfp->state == BFA_SFP_STATE_VALID)
3606 sfp->status = BFA_STATUS_OK;
3607 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3608 sfp->status = BFA_STATUS_SFP_UNSUPP;
3609 else
3610 bfa_trc(sfp, sfp->state);
3611 } else {
3612 sfp->data_valid = 0;
3613 sfp->status = rsp->status;
3614 /* sfpshow shouldn't change sfp state */
3615 }
3616
3617 bfa_trc(sfp, sfp->memtype);
3618 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3619 bfa_trc(sfp, sfp->data_valid);
3620 if (sfp->data_valid) {
3621 u32 size = sizeof(struct sfp_mem_s);
3622 u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3623 memcpy(des, sfp->dbuf_kva, size);
3624 }
3625 /*
3626 * Queue completion callback.
3627 */
3628 bfa_cb_sfp_show(sfp);
3629 } else
3630 sfp->lock = 0;
3631
3632 bfa_trc(sfp, sfp->state_query_lock);
3633 if (sfp->state_query_lock) {
3634 sfp->state = rsp->state;
3635 /* Complete callback */
3636 bfa_cb_sfp_state_query(sfp);
3637 }
3638}
3639
3640/*
3641 * SFP query fw sfp state
3642 */
3643static void
3644bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3645{
3646 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3647
3648 /* Should not be doing query if not in _INIT state */
3649 WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3650 WARN_ON(sfp->state_query_lock != 0);
3651 bfa_trc(sfp, sfp->state);
3652
3653 sfp->state_query_lock = 1;
3654 req->memtype = 0;
3655
3656 if (!sfp->lock)
3657 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3658}
3659
3660static void
3661bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3662{
3663 enum bfa_defs_sfp_media_e *media = sfp->media;
3664
3665 *media = BFA_SFP_MEDIA_UNKNOWN;
3666
3667 if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3668 *media = BFA_SFP_MEDIA_UNSUPPORT;
3669 else if (sfp->state == BFA_SFP_STATE_VALID) {
3670 union sfp_xcvr_e10g_code_u e10g;
3671 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3672 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3673 (sfpmem->srlid_base.xcvr[5] >> 1);
3674
3675 e10g.b = sfpmem->srlid_base.xcvr[0];
3676 bfa_trc(sfp, e10g.b);
3677 bfa_trc(sfp, xmtr_tech);
3678 /* check fc transmitter tech */
3679 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3680 (xmtr_tech & SFP_XMTR_TECH_CP) ||
3681 (xmtr_tech & SFP_XMTR_TECH_CA))
3682 *media = BFA_SFP_MEDIA_CU;
3683 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3684 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3685 *media = BFA_SFP_MEDIA_EL;
3686 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3687 (xmtr_tech & SFP_XMTR_TECH_LC))
3688 *media = BFA_SFP_MEDIA_LW;
3689 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3690 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3691 (xmtr_tech & SFP_XMTR_TECH_SA))
3692 *media = BFA_SFP_MEDIA_SW;
3693 /* Check 10G Ethernet Compilance code */
Jing Huang98cdfb42011-11-16 12:29:26 -08003694 else if (e10g.r.e10g_sr)
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003695 *media = BFA_SFP_MEDIA_SW;
Jing Huang98cdfb42011-11-16 12:29:26 -08003696 else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003697 *media = BFA_SFP_MEDIA_LW;
Jing Huang98cdfb42011-11-16 12:29:26 -08003698 else if (e10g.r.e10g_unall)
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003699 *media = BFA_SFP_MEDIA_UNKNOWN;
3700 else
3701 bfa_trc(sfp, 0);
3702 } else
3703 bfa_trc(sfp, sfp->state);
3704}
3705
3706static bfa_status_t
3707bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3708{
3709 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3710 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3711 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3712 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3713
3714 if (portspeed == BFA_PORT_SPEED_10GBPS) {
3715 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3716 return BFA_STATUS_OK;
3717 else {
3718 bfa_trc(sfp, e10g.b);
3719 return BFA_STATUS_UNSUPP_SPEED;
3720 }
3721 }
3722 if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3723 ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3724 ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3725 ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3726 ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3727 return BFA_STATUS_OK;
3728 else {
3729 bfa_trc(sfp, portspeed);
3730 bfa_trc(sfp, fc3.b);
3731 bfa_trc(sfp, e10g.b);
3732 return BFA_STATUS_UNSUPP_SPEED;
3733 }
3734}
3735
3736/*
3737 * SFP hmbox handler
3738 */
3739void
3740bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3741{
3742 struct bfa_sfp_s *sfp = sfparg;
3743
3744 switch (msg->mh.msg_id) {
3745 case BFI_SFP_I2H_SHOW:
3746 bfa_sfp_show_comp(sfp, msg);
3747 break;
3748
3749 case BFI_SFP_I2H_SCN:
Krishna Gudipati7826f302011-07-20 16:59:13 -07003750 bfa_sfp_scn(sfp, msg);
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003751 break;
3752
3753 default:
3754 bfa_trc(sfp, msg->mh.msg_id);
3755 WARN_ON(1);
3756 }
3757}
3758
3759/*
3760 * Return DMA memory needed by sfp module.
3761 */
3762u32
3763bfa_sfp_meminfo(void)
3764{
3765 return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3766}
3767
3768/*
3769 * Attach virtual and physical memory for SFP.
3770 */
3771void
3772bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3773 struct bfa_trc_mod_s *trcmod)
3774{
3775 sfp->dev = dev;
3776 sfp->ioc = ioc;
3777 sfp->trcmod = trcmod;
3778
3779 sfp->cbfn = NULL;
3780 sfp->cbarg = NULL;
3781 sfp->sfpmem = NULL;
3782 sfp->lock = 0;
3783 sfp->data_valid = 0;
3784 sfp->state = BFA_SFP_STATE_INIT;
3785 sfp->state_query_lock = 0;
3786 sfp->state_query_cbfn = NULL;
3787 sfp->state_query_cbarg = NULL;
3788 sfp->media = NULL;
3789 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3790 sfp->is_elb = BFA_FALSE;
3791
3792 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3793 bfa_q_qe_init(&sfp->ioc_notify);
3794 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3795 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3796}
3797
3798/*
3799 * Claim Memory for SFP
3800 */
3801void
3802bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3803{
3804 sfp->dbuf_kva = dm_kva;
3805 sfp->dbuf_pa = dm_pa;
3806 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3807
3808 dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3809 dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3810}
3811
3812/*
3813 * Show SFP eeprom content
3814 *
3815 * @param[in] sfp - bfa sfp module
3816 *
3817 * @param[out] sfpmem - sfp eeprom data
3818 *
3819 */
3820bfa_status_t
3821bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3822 bfa_cb_sfp_t cbfn, void *cbarg)
3823{
3824
3825 if (!bfa_ioc_is_operational(sfp->ioc)) {
3826 bfa_trc(sfp, 0);
3827 return BFA_STATUS_IOC_NON_OP;
3828 }
3829
3830 if (sfp->lock) {
3831 bfa_trc(sfp, 0);
3832 return BFA_STATUS_DEVBUSY;
3833 }
3834
3835 sfp->cbfn = cbfn;
3836 sfp->cbarg = cbarg;
3837 sfp->sfpmem = sfpmem;
3838
3839 bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3840 return BFA_STATUS_OK;
3841}
3842
3843/*
3844 * Return SFP Media type
3845 *
3846 * @param[in] sfp - bfa sfp module
3847 *
3848 * @param[out] media - port speed from user
3849 *
3850 */
3851bfa_status_t
3852bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3853 bfa_cb_sfp_t cbfn, void *cbarg)
3854{
3855 if (!bfa_ioc_is_operational(sfp->ioc)) {
3856 bfa_trc(sfp, 0);
3857 return BFA_STATUS_IOC_NON_OP;
3858 }
3859
3860 sfp->media = media;
3861 if (sfp->state == BFA_SFP_STATE_INIT) {
3862 if (sfp->state_query_lock) {
3863 bfa_trc(sfp, 0);
3864 return BFA_STATUS_DEVBUSY;
3865 } else {
3866 sfp->state_query_cbfn = cbfn;
3867 sfp->state_query_cbarg = cbarg;
3868 bfa_sfp_state_query(sfp);
3869 return BFA_STATUS_SFP_NOT_READY;
3870 }
3871 }
3872
3873 bfa_sfp_media_get(sfp);
3874 return BFA_STATUS_OK;
3875}
3876
3877/*
3878 * Check if user set port speed is allowed by the SFP
3879 *
3880 * @param[in] sfp - bfa sfp module
3881 * @param[in] portspeed - port speed from user
3882 *
3883 */
3884bfa_status_t
3885bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3886 bfa_cb_sfp_t cbfn, void *cbarg)
3887{
3888 WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3889
3890 if (!bfa_ioc_is_operational(sfp->ioc))
3891 return BFA_STATUS_IOC_NON_OP;
3892
3893 /* For Mezz card, all speed is allowed */
3894 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3895 return BFA_STATUS_OK;
3896
3897 /* Check SFP state */
3898 sfp->portspeed = portspeed;
3899 if (sfp->state == BFA_SFP_STATE_INIT) {
3900 if (sfp->state_query_lock) {
3901 bfa_trc(sfp, 0);
3902 return BFA_STATUS_DEVBUSY;
3903 } else {
3904 sfp->state_query_cbfn = cbfn;
3905 sfp->state_query_cbarg = cbarg;
3906 bfa_sfp_state_query(sfp);
3907 return BFA_STATUS_SFP_NOT_READY;
3908 }
3909 }
3910
3911 if (sfp->state == BFA_SFP_STATE_REMOVED ||
3912 sfp->state == BFA_SFP_STATE_FAILED) {
3913 bfa_trc(sfp, sfp->state);
3914 return BFA_STATUS_NO_SFP_DEV;
3915 }
3916
3917 if (sfp->state == BFA_SFP_STATE_INSERTED) {
3918 bfa_trc(sfp, sfp->state);
3919 return BFA_STATUS_DEVBUSY; /* sfp is reading data */
3920 }
3921
3922 /* For eloopback, all speed is allowed */
3923 if (sfp->is_elb)
3924 return BFA_STATUS_OK;
3925
3926 return bfa_sfp_speed_valid(sfp, portspeed);
3927}
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07003928
3929/*
3930 * Flash module specific
3931 */
3932
3933/*
3934 * FLASH DMA buffer should be big enough to hold both MFG block and
3935 * asic block(64k) at the same time and also should be 2k aligned to
3936 * avoid write segement to cross sector boundary.
3937 */
3938#define BFA_FLASH_SEG_SZ 2048
3939#define BFA_FLASH_DMA_BUF_SZ \
3940 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3941
3942static void
Krishna Gudipati7826f302011-07-20 16:59:13 -07003943bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
3944 int inst, int type)
3945{
3946 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
3947 struct bfa_aen_entry_s *aen_entry;
3948
3949 bfad_get_aen_entry(bfad, aen_entry);
3950 if (!aen_entry)
3951 return;
3952
3953 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
3954 aen_entry->aen_data.audit.partition_inst = inst;
3955 aen_entry->aen_data.audit.partition_type = type;
3956
3957 /* Send the AEN notification */
3958 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
3959 BFA_AEN_CAT_AUDIT, event);
3960}
3961
3962static void
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07003963bfa_flash_cb(struct bfa_flash_s *flash)
3964{
3965 flash->op_busy = 0;
3966 if (flash->cbfn)
3967 flash->cbfn(flash->cbarg, flash->status);
3968}
3969
3970static void
3971bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
3972{
3973 struct bfa_flash_s *flash = cbarg;
3974
3975 bfa_trc(flash, event);
3976 switch (event) {
3977 case BFA_IOC_E_DISABLED:
3978 case BFA_IOC_E_FAILED:
3979 if (flash->op_busy) {
3980 flash->status = BFA_STATUS_IOC_FAILURE;
3981 flash->cbfn(flash->cbarg, flash->status);
3982 flash->op_busy = 0;
3983 }
3984 break;
3985
3986 default:
3987 break;
3988 }
3989}
3990
3991/*
3992 * Send flash attribute query request.
3993 *
3994 * @param[in] cbarg - callback argument
3995 */
3996static void
3997bfa_flash_query_send(void *cbarg)
3998{
3999 struct bfa_flash_s *flash = cbarg;
4000 struct bfi_flash_query_req_s *msg =
4001 (struct bfi_flash_query_req_s *) flash->mb.msg;
4002
4003 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4004 bfa_ioc_portid(flash->ioc));
4005 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4006 flash->dbuf_pa);
4007 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4008}
4009
4010/*
4011 * Send flash write request.
4012 *
4013 * @param[in] cbarg - callback argument
4014 */
4015static void
4016bfa_flash_write_send(struct bfa_flash_s *flash)
4017{
4018 struct bfi_flash_write_req_s *msg =
4019 (struct bfi_flash_write_req_s *) flash->mb.msg;
4020 u32 len;
4021
4022 msg->type = be32_to_cpu(flash->type);
4023 msg->instance = flash->instance;
4024 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4025 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4026 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4027 msg->length = be32_to_cpu(len);
4028
4029 /* indicate if it's the last msg of the whole write operation */
4030 msg->last = (len == flash->residue) ? 1 : 0;
4031
4032 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4033 bfa_ioc_portid(flash->ioc));
4034 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4035 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4036 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4037
4038 flash->residue -= len;
4039 flash->offset += len;
4040}
4041
4042/*
4043 * Send flash read request.
4044 *
4045 * @param[in] cbarg - callback argument
4046 */
4047static void
4048bfa_flash_read_send(void *cbarg)
4049{
4050 struct bfa_flash_s *flash = cbarg;
4051 struct bfi_flash_read_req_s *msg =
4052 (struct bfi_flash_read_req_s *) flash->mb.msg;
4053 u32 len;
4054
4055 msg->type = be32_to_cpu(flash->type);
4056 msg->instance = flash->instance;
4057 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4058 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4059 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4060 msg->length = be32_to_cpu(len);
4061 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4062 bfa_ioc_portid(flash->ioc));
4063 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4064 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4065}
4066
4067/*
4068 * Send flash erase request.
4069 *
4070 * @param[in] cbarg - callback argument
4071 */
4072static void
4073bfa_flash_erase_send(void *cbarg)
4074{
4075 struct bfa_flash_s *flash = cbarg;
4076 struct bfi_flash_erase_req_s *msg =
4077 (struct bfi_flash_erase_req_s *) flash->mb.msg;
4078
4079 msg->type = be32_to_cpu(flash->type);
4080 msg->instance = flash->instance;
4081 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4082 bfa_ioc_portid(flash->ioc));
4083 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4084}
4085
4086/*
4087 * Process flash response messages upon receiving interrupts.
4088 *
4089 * @param[in] flasharg - flash structure
4090 * @param[in] msg - message structure
4091 */
4092static void
4093bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4094{
4095 struct bfa_flash_s *flash = flasharg;
4096 u32 status;
4097
4098 union {
4099 struct bfi_flash_query_rsp_s *query;
4100 struct bfi_flash_erase_rsp_s *erase;
4101 struct bfi_flash_write_rsp_s *write;
4102 struct bfi_flash_read_rsp_s *read;
Krishna Gudipati7826f302011-07-20 16:59:13 -07004103 struct bfi_flash_event_s *event;
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004104 struct bfi_mbmsg_s *msg;
4105 } m;
4106
4107 m.msg = msg;
4108 bfa_trc(flash, msg->mh.msg_id);
4109
4110 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4111 /* receiving response after ioc failure */
4112 bfa_trc(flash, 0x9999);
4113 return;
4114 }
4115
4116 switch (msg->mh.msg_id) {
4117 case BFI_FLASH_I2H_QUERY_RSP:
4118 status = be32_to_cpu(m.query->status);
4119 bfa_trc(flash, status);
4120 if (status == BFA_STATUS_OK) {
4121 u32 i;
4122 struct bfa_flash_attr_s *attr, *f;
4123
4124 attr = (struct bfa_flash_attr_s *) flash->ubuf;
4125 f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4126 attr->status = be32_to_cpu(f->status);
4127 attr->npart = be32_to_cpu(f->npart);
4128 bfa_trc(flash, attr->status);
4129 bfa_trc(flash, attr->npart);
4130 for (i = 0; i < attr->npart; i++) {
4131 attr->part[i].part_type =
4132 be32_to_cpu(f->part[i].part_type);
4133 attr->part[i].part_instance =
4134 be32_to_cpu(f->part[i].part_instance);
4135 attr->part[i].part_off =
4136 be32_to_cpu(f->part[i].part_off);
4137 attr->part[i].part_size =
4138 be32_to_cpu(f->part[i].part_size);
4139 attr->part[i].part_len =
4140 be32_to_cpu(f->part[i].part_len);
4141 attr->part[i].part_status =
4142 be32_to_cpu(f->part[i].part_status);
4143 }
4144 }
4145 flash->status = status;
4146 bfa_flash_cb(flash);
4147 break;
4148 case BFI_FLASH_I2H_ERASE_RSP:
4149 status = be32_to_cpu(m.erase->status);
4150 bfa_trc(flash, status);
4151 flash->status = status;
4152 bfa_flash_cb(flash);
4153 break;
4154 case BFI_FLASH_I2H_WRITE_RSP:
4155 status = be32_to_cpu(m.write->status);
4156 bfa_trc(flash, status);
4157 if (status != BFA_STATUS_OK || flash->residue == 0) {
4158 flash->status = status;
4159 bfa_flash_cb(flash);
4160 } else {
4161 bfa_trc(flash, flash->offset);
4162 bfa_flash_write_send(flash);
4163 }
4164 break;
4165 case BFI_FLASH_I2H_READ_RSP:
4166 status = be32_to_cpu(m.read->status);
4167 bfa_trc(flash, status);
4168 if (status != BFA_STATUS_OK) {
4169 flash->status = status;
4170 bfa_flash_cb(flash);
4171 } else {
4172 u32 len = be32_to_cpu(m.read->length);
4173 bfa_trc(flash, flash->offset);
4174 bfa_trc(flash, len);
4175 memcpy(flash->ubuf + flash->offset,
4176 flash->dbuf_kva, len);
4177 flash->residue -= len;
4178 flash->offset += len;
4179 if (flash->residue == 0) {
4180 flash->status = status;
4181 bfa_flash_cb(flash);
4182 } else
4183 bfa_flash_read_send(flash);
4184 }
4185 break;
4186 case BFI_FLASH_I2H_BOOT_VER_RSP:
Krishna Gudipati7826f302011-07-20 16:59:13 -07004187 break;
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004188 case BFI_FLASH_I2H_EVENT:
Krishna Gudipati7826f302011-07-20 16:59:13 -07004189 status = be32_to_cpu(m.event->status);
4190 bfa_trc(flash, status);
4191 if (status == BFA_STATUS_BAD_FWCFG)
4192 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4193 else if (status == BFA_STATUS_INVALID_VENDOR) {
4194 u32 param;
4195 param = be32_to_cpu(m.event->param);
4196 bfa_trc(flash, param);
4197 bfa_ioc_aen_post(flash->ioc,
4198 BFA_IOC_AEN_INVALID_VENDOR);
4199 }
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004200 break;
4201
4202 default:
4203 WARN_ON(1);
4204 }
4205}
4206
4207/*
4208 * Flash memory info API.
4209 *
4210 * @param[in] mincfg - minimal cfg variable
4211 */
4212u32
4213bfa_flash_meminfo(bfa_boolean_t mincfg)
4214{
4215 /* min driver doesn't need flash */
4216 if (mincfg)
4217 return 0;
4218 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4219}
4220
4221/*
4222 * Flash attach API.
4223 *
4224 * @param[in] flash - flash structure
4225 * @param[in] ioc - ioc structure
4226 * @param[in] dev - device structure
4227 * @param[in] trcmod - trace module
4228 * @param[in] logmod - log module
4229 */
4230void
4231bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4232 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4233{
4234 flash->ioc = ioc;
4235 flash->trcmod = trcmod;
4236 flash->cbfn = NULL;
4237 flash->cbarg = NULL;
4238 flash->op_busy = 0;
4239
4240 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4241 bfa_q_qe_init(&flash->ioc_notify);
4242 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4243 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4244
4245 /* min driver doesn't need flash */
4246 if (mincfg) {
4247 flash->dbuf_kva = NULL;
4248 flash->dbuf_pa = 0;
4249 }
4250}
4251
4252/*
4253 * Claim memory for flash
4254 *
4255 * @param[in] flash - flash structure
4256 * @param[in] dm_kva - pointer to virtual memory address
4257 * @param[in] dm_pa - physical memory address
4258 * @param[in] mincfg - minimal cfg variable
4259 */
4260void
4261bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4262 bfa_boolean_t mincfg)
4263{
4264 if (mincfg)
4265 return;
4266
4267 flash->dbuf_kva = dm_kva;
4268 flash->dbuf_pa = dm_pa;
4269 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4270 dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4271 dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4272}
4273
4274/*
4275 * Get flash attribute.
4276 *
4277 * @param[in] flash - flash structure
4278 * @param[in] attr - flash attribute structure
4279 * @param[in] cbfn - callback function
4280 * @param[in] cbarg - callback argument
4281 *
4282 * Return status.
4283 */
4284bfa_status_t
4285bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4286 bfa_cb_flash_t cbfn, void *cbarg)
4287{
4288 bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4289
4290 if (!bfa_ioc_is_operational(flash->ioc))
4291 return BFA_STATUS_IOC_NON_OP;
4292
4293 if (flash->op_busy) {
4294 bfa_trc(flash, flash->op_busy);
4295 return BFA_STATUS_DEVBUSY;
4296 }
4297
4298 flash->op_busy = 1;
4299 flash->cbfn = cbfn;
4300 flash->cbarg = cbarg;
4301 flash->ubuf = (u8 *) attr;
4302 bfa_flash_query_send(flash);
4303
4304 return BFA_STATUS_OK;
4305}
4306
4307/*
4308 * Erase flash partition.
4309 *
4310 * @param[in] flash - flash structure
4311 * @param[in] type - flash partition type
4312 * @param[in] instance - flash partition instance
4313 * @param[in] cbfn - callback function
4314 * @param[in] cbarg - callback argument
4315 *
4316 * Return status.
4317 */
4318bfa_status_t
4319bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4320 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4321{
4322 bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4323 bfa_trc(flash, type);
4324 bfa_trc(flash, instance);
4325
4326 if (!bfa_ioc_is_operational(flash->ioc))
4327 return BFA_STATUS_IOC_NON_OP;
4328
4329 if (flash->op_busy) {
4330 bfa_trc(flash, flash->op_busy);
4331 return BFA_STATUS_DEVBUSY;
4332 }
4333
4334 flash->op_busy = 1;
4335 flash->cbfn = cbfn;
4336 flash->cbarg = cbarg;
4337 flash->type = type;
4338 flash->instance = instance;
4339
4340 bfa_flash_erase_send(flash);
Krishna Gudipati7826f302011-07-20 16:59:13 -07004341 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4342 instance, type);
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004343 return BFA_STATUS_OK;
4344}
4345
4346/*
4347 * Update flash partition.
4348 *
4349 * @param[in] flash - flash structure
4350 * @param[in] type - flash partition type
4351 * @param[in] instance - flash partition instance
4352 * @param[in] buf - update data buffer
4353 * @param[in] len - data buffer length
4354 * @param[in] offset - offset relative to the partition starting address
4355 * @param[in] cbfn - callback function
4356 * @param[in] cbarg - callback argument
4357 *
4358 * Return status.
4359 */
4360bfa_status_t
4361bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4362 u8 instance, void *buf, u32 len, u32 offset,
4363 bfa_cb_flash_t cbfn, void *cbarg)
4364{
4365 bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4366 bfa_trc(flash, type);
4367 bfa_trc(flash, instance);
4368 bfa_trc(flash, len);
4369 bfa_trc(flash, offset);
4370
4371 if (!bfa_ioc_is_operational(flash->ioc))
4372 return BFA_STATUS_IOC_NON_OP;
4373
4374 /*
4375 * 'len' must be in word (4-byte) boundary
4376 * 'offset' must be in sector (16kb) boundary
4377 */
4378 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4379 return BFA_STATUS_FLASH_BAD_LEN;
4380
4381 if (type == BFA_FLASH_PART_MFG)
4382 return BFA_STATUS_EINVAL;
4383
4384 if (flash->op_busy) {
4385 bfa_trc(flash, flash->op_busy);
4386 return BFA_STATUS_DEVBUSY;
4387 }
4388
4389 flash->op_busy = 1;
4390 flash->cbfn = cbfn;
4391 flash->cbarg = cbarg;
4392 flash->type = type;
4393 flash->instance = instance;
4394 flash->residue = len;
4395 flash->offset = 0;
4396 flash->addr_off = offset;
4397 flash->ubuf = buf;
4398
4399 bfa_flash_write_send(flash);
4400 return BFA_STATUS_OK;
4401}
4402
4403/*
4404 * Read flash partition.
4405 *
4406 * @param[in] flash - flash structure
4407 * @param[in] type - flash partition type
4408 * @param[in] instance - flash partition instance
4409 * @param[in] buf - read data buffer
4410 * @param[in] len - data buffer length
4411 * @param[in] offset - offset relative to the partition starting address
4412 * @param[in] cbfn - callback function
4413 * @param[in] cbarg - callback argument
4414 *
4415 * Return status.
4416 */
4417bfa_status_t
4418bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4419 u8 instance, void *buf, u32 len, u32 offset,
4420 bfa_cb_flash_t cbfn, void *cbarg)
4421{
4422 bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4423 bfa_trc(flash, type);
4424 bfa_trc(flash, instance);
4425 bfa_trc(flash, len);
4426 bfa_trc(flash, offset);
4427
4428 if (!bfa_ioc_is_operational(flash->ioc))
4429 return BFA_STATUS_IOC_NON_OP;
4430
4431 /*
4432 * 'len' must be in word (4-byte) boundary
4433 * 'offset' must be in sector (16kb) boundary
4434 */
4435 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4436 return BFA_STATUS_FLASH_BAD_LEN;
4437
4438 if (flash->op_busy) {
4439 bfa_trc(flash, flash->op_busy);
4440 return BFA_STATUS_DEVBUSY;
4441 }
4442
4443 flash->op_busy = 1;
4444 flash->cbfn = cbfn;
4445 flash->cbarg = cbarg;
4446 flash->type = type;
4447 flash->instance = instance;
4448 flash->residue = len;
4449 flash->offset = 0;
4450 flash->addr_off = offset;
4451 flash->ubuf = buf;
4452 bfa_flash_read_send(flash);
4453
4454 return BFA_STATUS_OK;
4455}
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004456
4457/*
4458 * DIAG module specific
4459 */
4460
4461#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
Krishna Gudipatibd5a0262012-03-13 17:41:02 -07004462#define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004463
4464/* IOC event handler */
4465static void
4466bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4467{
4468 struct bfa_diag_s *diag = diag_arg;
4469
4470 bfa_trc(diag, event);
4471 bfa_trc(diag, diag->block);
4472 bfa_trc(diag, diag->fwping.lock);
4473 bfa_trc(diag, diag->tsensor.lock);
4474
4475 switch (event) {
4476 case BFA_IOC_E_DISABLED:
4477 case BFA_IOC_E_FAILED:
4478 if (diag->fwping.lock) {
4479 diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4480 diag->fwping.cbfn(diag->fwping.cbarg,
4481 diag->fwping.status);
4482 diag->fwping.lock = 0;
4483 }
4484
4485 if (diag->tsensor.lock) {
4486 diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4487 diag->tsensor.cbfn(diag->tsensor.cbarg,
4488 diag->tsensor.status);
4489 diag->tsensor.lock = 0;
4490 }
4491
4492 if (diag->block) {
4493 if (diag->timer_active) {
4494 bfa_timer_stop(&diag->timer);
4495 diag->timer_active = 0;
4496 }
4497
4498 diag->status = BFA_STATUS_IOC_FAILURE;
4499 diag->cbfn(diag->cbarg, diag->status);
4500 diag->block = 0;
4501 }
4502 break;
4503
4504 default:
4505 break;
4506 }
4507}
4508
4509static void
4510bfa_diag_memtest_done(void *cbarg)
4511{
4512 struct bfa_diag_s *diag = cbarg;
4513 struct bfa_ioc_s *ioc = diag->ioc;
4514 struct bfa_diag_memtest_result *res = diag->result;
4515 u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
4516 u32 pgnum, pgoff, i;
4517
4518 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4519 pgoff = PSS_SMEM_PGOFF(loff);
4520
4521 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4522
4523 for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4524 sizeof(u32)); i++) {
4525 /* read test result from smem */
4526 *((u32 *) res + i) =
4527 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4528 loff += sizeof(u32);
4529 }
4530
4531 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4532 bfa_ioc_reset_fwstate(ioc);
4533
4534 res->status = swab32(res->status);
4535 bfa_trc(diag, res->status);
4536
4537 if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4538 diag->status = BFA_STATUS_OK;
4539 else {
4540 diag->status = BFA_STATUS_MEMTEST_FAILED;
4541 res->addr = swab32(res->addr);
4542 res->exp = swab32(res->exp);
4543 res->act = swab32(res->act);
4544 res->err_status = swab32(res->err_status);
4545 res->err_status1 = swab32(res->err_status1);
4546 res->err_addr = swab32(res->err_addr);
4547 bfa_trc(diag, res->addr);
4548 bfa_trc(diag, res->exp);
4549 bfa_trc(diag, res->act);
4550 bfa_trc(diag, res->err_status);
4551 bfa_trc(diag, res->err_status1);
4552 bfa_trc(diag, res->err_addr);
4553 }
4554 diag->timer_active = 0;
4555 diag->cbfn(diag->cbarg, diag->status);
4556 diag->block = 0;
4557}
4558
4559/*
4560 * Firmware ping
4561 */
4562
4563/*
4564 * Perform DMA test directly
4565 */
4566static void
4567diag_fwping_send(struct bfa_diag_s *diag)
4568{
4569 struct bfi_diag_fwping_req_s *fwping_req;
4570 u32 i;
4571
4572 bfa_trc(diag, diag->fwping.dbuf_pa);
4573
4574 /* fill DMA area with pattern */
4575 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4576 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4577
4578 /* Fill mbox msg */
4579 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4580
4581 /* Setup SG list */
4582 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4583 diag->fwping.dbuf_pa);
4584 /* Set up dma count */
4585 fwping_req->count = cpu_to_be32(diag->fwping.count);
4586 /* Set up data pattern */
4587 fwping_req->data = diag->fwping.data;
4588
4589 /* build host command */
4590 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4591 bfa_ioc_portid(diag->ioc));
4592
4593 /* send mbox cmd */
4594 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4595}
4596
4597static void
4598diag_fwping_comp(struct bfa_diag_s *diag,
4599 struct bfi_diag_fwping_rsp_s *diag_rsp)
4600{
4601 u32 rsp_data = diag_rsp->data;
4602 u8 rsp_dma_status = diag_rsp->dma_status;
4603
4604 bfa_trc(diag, rsp_data);
4605 bfa_trc(diag, rsp_dma_status);
4606
4607 if (rsp_dma_status == BFA_STATUS_OK) {
4608 u32 i, pat;
4609 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4610 diag->fwping.data;
4611 /* Check mbox data */
4612 if (diag->fwping.data != rsp_data) {
4613 bfa_trc(diag, rsp_data);
4614 diag->fwping.result->dmastatus =
4615 BFA_STATUS_DATACORRUPTED;
4616 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4617 diag->fwping.cbfn(diag->fwping.cbarg,
4618 diag->fwping.status);
4619 diag->fwping.lock = 0;
4620 return;
4621 }
4622 /* Check dma pattern */
4623 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4624 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4625 bfa_trc(diag, i);
4626 bfa_trc(diag, pat);
4627 bfa_trc(diag,
4628 *((u32 *)diag->fwping.dbuf_kva + i));
4629 diag->fwping.result->dmastatus =
4630 BFA_STATUS_DATACORRUPTED;
4631 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4632 diag->fwping.cbfn(diag->fwping.cbarg,
4633 diag->fwping.status);
4634 diag->fwping.lock = 0;
4635 return;
4636 }
4637 }
4638 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4639 diag->fwping.status = BFA_STATUS_OK;
4640 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4641 diag->fwping.lock = 0;
4642 } else {
4643 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4644 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4645 diag->fwping.lock = 0;
4646 }
4647}
4648
4649/*
4650 * Temperature Sensor
4651 */
4652
4653static void
4654diag_tempsensor_send(struct bfa_diag_s *diag)
4655{
4656 struct bfi_diag_ts_req_s *msg;
4657
4658 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4659 bfa_trc(diag, msg->temp);
4660 /* build host command */
4661 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4662 bfa_ioc_portid(diag->ioc));
4663 /* send mbox cmd */
4664 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4665}
4666
4667static void
4668diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4669{
4670 if (!diag->tsensor.lock) {
4671 /* receiving response after ioc failure */
4672 bfa_trc(diag, diag->tsensor.lock);
4673 return;
4674 }
4675
4676 /*
4677 * ASIC junction tempsensor is a reg read operation
4678 * it will always return OK
4679 */
4680 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4681 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4682 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4683 diag->tsensor.temp->status = BFA_STATUS_OK;
4684
4685 if (rsp->ts_brd) {
4686 if (rsp->status == BFA_STATUS_OK) {
4687 diag->tsensor.temp->brd_temp =
4688 be16_to_cpu(rsp->brd_temp);
4689 } else {
4690 bfa_trc(diag, rsp->status);
4691 diag->tsensor.temp->brd_temp = 0;
4692 diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4693 }
4694 }
4695 bfa_trc(diag, rsp->ts_junc);
4696 bfa_trc(diag, rsp->temp);
4697 bfa_trc(diag, rsp->ts_brd);
4698 bfa_trc(diag, rsp->brd_temp);
4699 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4700 diag->tsensor.lock = 0;
4701}
4702
4703/*
4704 * LED Test command
4705 */
4706static void
4707diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4708{
4709 struct bfi_diag_ledtest_req_s *msg;
4710
4711 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4712 /* build host command */
4713 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4714 bfa_ioc_portid(diag->ioc));
4715
4716 /*
4717 * convert the freq from N blinks per 10 sec to
4718 * crossbow ontime value. We do it here because division is need
4719 */
4720 if (ledtest->freq)
4721 ledtest->freq = 500 / ledtest->freq;
4722
4723 if (ledtest->freq == 0)
4724 ledtest->freq = 1;
4725
4726 bfa_trc(diag, ledtest->freq);
4727 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4728 msg->cmd = (u8) ledtest->cmd;
4729 msg->color = (u8) ledtest->color;
4730 msg->portid = bfa_ioc_portid(diag->ioc);
4731 msg->led = ledtest->led;
4732 msg->freq = cpu_to_be16(ledtest->freq);
4733
4734 /* send mbox cmd */
4735 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4736}
4737
4738static void
Krishna Gudipati89196782012-03-13 17:38:56 -07004739diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004740{
4741 bfa_trc(diag, diag->ledtest.lock);
4742 diag->ledtest.lock = BFA_FALSE;
4743 /* no bfa_cb_queue is needed because driver is not waiting */
4744}
4745
4746/*
4747 * Port beaconing
4748 */
4749static void
4750diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4751{
4752 struct bfi_diag_portbeacon_req_s *msg;
4753
4754 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4755 /* build host command */
4756 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4757 bfa_ioc_portid(diag->ioc));
4758 msg->beacon = beacon;
4759 msg->period = cpu_to_be32(sec);
4760 /* send mbox cmd */
4761 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4762}
4763
4764static void
4765diag_portbeacon_comp(struct bfa_diag_s *diag)
4766{
4767 bfa_trc(diag, diag->beacon.state);
4768 diag->beacon.state = BFA_FALSE;
4769 if (diag->cbfn_beacon)
4770 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4771}
4772
4773/*
4774 * Diag hmbox handler
4775 */
4776void
4777bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4778{
4779 struct bfa_diag_s *diag = diagarg;
4780
4781 switch (msg->mh.msg_id) {
4782 case BFI_DIAG_I2H_PORTBEACON:
4783 diag_portbeacon_comp(diag);
4784 break;
4785 case BFI_DIAG_I2H_FWPING:
4786 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4787 break;
4788 case BFI_DIAG_I2H_TEMPSENSOR:
4789 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4790 break;
4791 case BFI_DIAG_I2H_LEDTEST:
4792 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4793 break;
4794 default:
4795 bfa_trc(diag, msg->mh.msg_id);
4796 WARN_ON(1);
4797 }
4798}
4799
4800/*
4801 * Gen RAM Test
4802 *
4803 * @param[in] *diag - diag data struct
4804 * @param[in] *memtest - mem test params input from upper layer,
4805 * @param[in] pattern - mem test pattern
4806 * @param[in] *result - mem test result
4807 * @param[in] cbfn - mem test callback functioin
4808 * @param[in] cbarg - callback functioin arg
4809 *
4810 * @param[out]
4811 */
4812bfa_status_t
4813bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4814 u32 pattern, struct bfa_diag_memtest_result *result,
4815 bfa_cb_diag_t cbfn, void *cbarg)
4816{
Krishna Gudipatibd5a0262012-03-13 17:41:02 -07004817 u32 memtest_tov;
4818
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004819 bfa_trc(diag, pattern);
4820
4821 if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4822 return BFA_STATUS_ADAPTER_ENABLED;
4823
4824 /* check to see if there is another destructive diag cmd running */
4825 if (diag->block) {
4826 bfa_trc(diag, diag->block);
4827 return BFA_STATUS_DEVBUSY;
4828 } else
4829 diag->block = 1;
4830
4831 diag->result = result;
4832 diag->cbfn = cbfn;
4833 diag->cbarg = cbarg;
4834
4835 /* download memtest code and take LPU0 out of reset */
4836 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4837
Krishna Gudipatibd5a0262012-03-13 17:41:02 -07004838 memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
4839 CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004840 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
Krishna Gudipatibd5a0262012-03-13 17:41:02 -07004841 bfa_diag_memtest_done, diag, memtest_tov);
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004842 diag->timer_active = 1;
4843 return BFA_STATUS_OK;
4844}
4845
4846/*
4847 * DIAG firmware ping command
4848 *
4849 * @param[in] *diag - diag data struct
4850 * @param[in] cnt - dma loop count for testing PCIE
4851 * @param[in] data - data pattern to pass in fw
4852 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4853 * @param[in] cbfn - callback function
4854 * @param[in] *cbarg - callback functioin arg
4855 *
4856 * @param[out]
4857 */
4858bfa_status_t
4859bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4860 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4861 void *cbarg)
4862{
4863 bfa_trc(diag, cnt);
4864 bfa_trc(diag, data);
4865
4866 if (!bfa_ioc_is_operational(diag->ioc))
4867 return BFA_STATUS_IOC_NON_OP;
4868
4869 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4870 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4871 return BFA_STATUS_CMD_NOTSUPP;
4872
4873 /* check to see if there is another destructive diag cmd running */
4874 if (diag->block || diag->fwping.lock) {
4875 bfa_trc(diag, diag->block);
4876 bfa_trc(diag, diag->fwping.lock);
4877 return BFA_STATUS_DEVBUSY;
4878 }
4879
4880 /* Initialization */
4881 diag->fwping.lock = 1;
4882 diag->fwping.cbfn = cbfn;
4883 diag->fwping.cbarg = cbarg;
4884 diag->fwping.result = result;
4885 diag->fwping.data = data;
4886 diag->fwping.count = cnt;
4887
4888 /* Init test results */
4889 diag->fwping.result->data = 0;
4890 diag->fwping.result->status = BFA_STATUS_OK;
4891
4892 /* kick off the first ping */
4893 diag_fwping_send(diag);
4894 return BFA_STATUS_OK;
4895}
4896
4897/*
4898 * Read Temperature Sensor
4899 *
4900 * @param[in] *diag - diag data struct
4901 * @param[in] *result - pt to bfa_diag_temp_t data struct
4902 * @param[in] cbfn - callback function
4903 * @param[in] *cbarg - callback functioin arg
4904 *
4905 * @param[out]
4906 */
4907bfa_status_t
4908bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4909 struct bfa_diag_results_tempsensor_s *result,
4910 bfa_cb_diag_t cbfn, void *cbarg)
4911{
4912 /* check to see if there is a destructive diag cmd running */
4913 if (diag->block || diag->tsensor.lock) {
4914 bfa_trc(diag, diag->block);
4915 bfa_trc(diag, diag->tsensor.lock);
4916 return BFA_STATUS_DEVBUSY;
4917 }
4918
4919 if (!bfa_ioc_is_operational(diag->ioc))
4920 return BFA_STATUS_IOC_NON_OP;
4921
4922 /* Init diag mod params */
4923 diag->tsensor.lock = 1;
4924 diag->tsensor.temp = result;
4925 diag->tsensor.cbfn = cbfn;
4926 diag->tsensor.cbarg = cbarg;
4927
4928 /* Send msg to fw */
4929 diag_tempsensor_send(diag);
4930
4931 return BFA_STATUS_OK;
4932}
4933
4934/*
4935 * LED Test command
4936 *
4937 * @param[in] *diag - diag data struct
4938 * @param[in] *ledtest - pt to ledtest data structure
4939 *
4940 * @param[out]
4941 */
4942bfa_status_t
4943bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4944{
4945 bfa_trc(diag, ledtest->cmd);
4946
4947 if (!bfa_ioc_is_operational(diag->ioc))
4948 return BFA_STATUS_IOC_NON_OP;
4949
4950 if (diag->beacon.state)
4951 return BFA_STATUS_BEACON_ON;
4952
4953 if (diag->ledtest.lock)
4954 return BFA_STATUS_LEDTEST_OP;
4955
4956 /* Send msg to fw */
4957 diag->ledtest.lock = BFA_TRUE;
4958 diag_ledtest_send(diag, ledtest);
4959
4960 return BFA_STATUS_OK;
4961}
4962
4963/*
4964 * Port beaconing command
4965 *
4966 * @param[in] *diag - diag data struct
4967 * @param[in] beacon - port beaconing 1:ON 0:OFF
4968 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
4969 * @param[in] sec - beaconing duration in seconds
4970 *
4971 * @param[out]
4972 */
4973bfa_status_t
4974bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
4975 bfa_boolean_t link_e2e_beacon, uint32_t sec)
4976{
4977 bfa_trc(diag, beacon);
4978 bfa_trc(diag, link_e2e_beacon);
4979 bfa_trc(diag, sec);
4980
4981 if (!bfa_ioc_is_operational(diag->ioc))
4982 return BFA_STATUS_IOC_NON_OP;
4983
4984 if (diag->ledtest.lock)
4985 return BFA_STATUS_LEDTEST_OP;
4986
4987 if (diag->beacon.state && beacon) /* beacon alread on */
4988 return BFA_STATUS_BEACON_ON;
4989
4990 diag->beacon.state = beacon;
4991 diag->beacon.link_e2e = link_e2e_beacon;
4992 if (diag->cbfn_beacon)
4993 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
4994
4995 /* Send msg to fw */
4996 diag_portbeacon_send(diag, beacon, sec);
4997
4998 return BFA_STATUS_OK;
4999}
5000
5001/*
5002 * Return DMA memory needed by diag module.
5003 */
5004u32
5005bfa_diag_meminfo(void)
5006{
5007 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5008}
5009
5010/*
5011 * Attach virtual and physical memory for Diag.
5012 */
5013void
5014bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5015 bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5016{
5017 diag->dev = dev;
5018 diag->ioc = ioc;
5019 diag->trcmod = trcmod;
5020
5021 diag->block = 0;
5022 diag->cbfn = NULL;
5023 diag->cbarg = NULL;
5024 diag->result = NULL;
5025 diag->cbfn_beacon = cbfn_beacon;
5026
5027 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5028 bfa_q_qe_init(&diag->ioc_notify);
5029 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5030 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5031}
5032
5033void
5034bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5035{
5036 diag->fwping.dbuf_kva = dm_kva;
5037 diag->fwping.dbuf_pa = dm_pa;
5038 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5039}
Krishna Gudipati3350d982011-06-24 20:28:37 -07005040
5041/*
5042 * PHY module specific
5043 */
5044#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5045#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5046
5047static void
5048bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5049{
5050 int i, m = sz >> 2;
5051
5052 for (i = 0; i < m; i++)
5053 obuf[i] = be32_to_cpu(ibuf[i]);
5054}
5055
5056static bfa_boolean_t
5057bfa_phy_present(struct bfa_phy_s *phy)
5058{
5059 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5060}
5061
5062static void
5063bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5064{
5065 struct bfa_phy_s *phy = cbarg;
5066
5067 bfa_trc(phy, event);
5068
5069 switch (event) {
5070 case BFA_IOC_E_DISABLED:
5071 case BFA_IOC_E_FAILED:
5072 if (phy->op_busy) {
5073 phy->status = BFA_STATUS_IOC_FAILURE;
5074 phy->cbfn(phy->cbarg, phy->status);
5075 phy->op_busy = 0;
5076 }
5077 break;
5078
5079 default:
5080 break;
5081 }
5082}
5083
5084/*
5085 * Send phy attribute query request.
5086 *
5087 * @param[in] cbarg - callback argument
5088 */
5089static void
5090bfa_phy_query_send(void *cbarg)
5091{
5092 struct bfa_phy_s *phy = cbarg;
5093 struct bfi_phy_query_req_s *msg =
5094 (struct bfi_phy_query_req_s *) phy->mb.msg;
5095
5096 msg->instance = phy->instance;
5097 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5098 bfa_ioc_portid(phy->ioc));
5099 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5100 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5101}
5102
5103/*
5104 * Send phy write request.
5105 *
5106 * @param[in] cbarg - callback argument
5107 */
5108static void
5109bfa_phy_write_send(void *cbarg)
5110{
5111 struct bfa_phy_s *phy = cbarg;
5112 struct bfi_phy_write_req_s *msg =
5113 (struct bfi_phy_write_req_s *) phy->mb.msg;
5114 u32 len;
5115 u16 *buf, *dbuf;
5116 int i, sz;
5117
5118 msg->instance = phy->instance;
5119 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5120 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5121 phy->residue : BFA_PHY_DMA_BUF_SZ;
5122 msg->length = cpu_to_be32(len);
5123
5124 /* indicate if it's the last msg of the whole write operation */
5125 msg->last = (len == phy->residue) ? 1 : 0;
5126
5127 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5128 bfa_ioc_portid(phy->ioc));
5129 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5130
5131 buf = (u16 *) (phy->ubuf + phy->offset);
5132 dbuf = (u16 *)phy->dbuf_kva;
5133 sz = len >> 1;
5134 for (i = 0; i < sz; i++)
5135 buf[i] = cpu_to_be16(dbuf[i]);
5136
5137 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5138
5139 phy->residue -= len;
5140 phy->offset += len;
5141}
5142
5143/*
5144 * Send phy read request.
5145 *
5146 * @param[in] cbarg - callback argument
5147 */
5148static void
5149bfa_phy_read_send(void *cbarg)
5150{
5151 struct bfa_phy_s *phy = cbarg;
5152 struct bfi_phy_read_req_s *msg =
5153 (struct bfi_phy_read_req_s *) phy->mb.msg;
5154 u32 len;
5155
5156 msg->instance = phy->instance;
5157 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5158 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5159 phy->residue : BFA_PHY_DMA_BUF_SZ;
5160 msg->length = cpu_to_be32(len);
5161 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5162 bfa_ioc_portid(phy->ioc));
5163 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5164 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5165}
5166
5167/*
5168 * Send phy stats request.
5169 *
5170 * @param[in] cbarg - callback argument
5171 */
5172static void
5173bfa_phy_stats_send(void *cbarg)
5174{
5175 struct bfa_phy_s *phy = cbarg;
5176 struct bfi_phy_stats_req_s *msg =
5177 (struct bfi_phy_stats_req_s *) phy->mb.msg;
5178
5179 msg->instance = phy->instance;
5180 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5181 bfa_ioc_portid(phy->ioc));
5182 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5183 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5184}
5185
5186/*
5187 * Flash memory info API.
5188 *
5189 * @param[in] mincfg - minimal cfg variable
5190 */
5191u32
5192bfa_phy_meminfo(bfa_boolean_t mincfg)
5193{
5194 /* min driver doesn't need phy */
5195 if (mincfg)
5196 return 0;
5197
5198 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5199}
5200
5201/*
5202 * Flash attach API.
5203 *
5204 * @param[in] phy - phy structure
5205 * @param[in] ioc - ioc structure
5206 * @param[in] dev - device structure
5207 * @param[in] trcmod - trace module
5208 * @param[in] logmod - log module
5209 */
5210void
5211bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5212 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5213{
5214 phy->ioc = ioc;
5215 phy->trcmod = trcmod;
5216 phy->cbfn = NULL;
5217 phy->cbarg = NULL;
5218 phy->op_busy = 0;
5219
5220 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5221 bfa_q_qe_init(&phy->ioc_notify);
5222 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5223 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5224
5225 /* min driver doesn't need phy */
5226 if (mincfg) {
5227 phy->dbuf_kva = NULL;
5228 phy->dbuf_pa = 0;
5229 }
5230}
5231
5232/*
5233 * Claim memory for phy
5234 *
5235 * @param[in] phy - phy structure
5236 * @param[in] dm_kva - pointer to virtual memory address
5237 * @param[in] dm_pa - physical memory address
5238 * @param[in] mincfg - minimal cfg variable
5239 */
5240void
5241bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5242 bfa_boolean_t mincfg)
5243{
5244 if (mincfg)
5245 return;
5246
5247 phy->dbuf_kva = dm_kva;
5248 phy->dbuf_pa = dm_pa;
5249 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5250 dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5251 dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5252}
5253
5254bfa_boolean_t
5255bfa_phy_busy(struct bfa_ioc_s *ioc)
5256{
5257 void __iomem *rb;
5258
5259 rb = bfa_ioc_bar0(ioc);
5260 return readl(rb + BFA_PHY_LOCK_STATUS);
5261}
5262
5263/*
5264 * Get phy attribute.
5265 *
5266 * @param[in] phy - phy structure
5267 * @param[in] attr - phy attribute structure
5268 * @param[in] cbfn - callback function
5269 * @param[in] cbarg - callback argument
5270 *
5271 * Return status.
5272 */
5273bfa_status_t
5274bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5275 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5276{
5277 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5278 bfa_trc(phy, instance);
5279
5280 if (!bfa_phy_present(phy))
5281 return BFA_STATUS_PHY_NOT_PRESENT;
5282
5283 if (!bfa_ioc_is_operational(phy->ioc))
5284 return BFA_STATUS_IOC_NON_OP;
5285
5286 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5287 bfa_trc(phy, phy->op_busy);
5288 return BFA_STATUS_DEVBUSY;
5289 }
5290
5291 phy->op_busy = 1;
5292 phy->cbfn = cbfn;
5293 phy->cbarg = cbarg;
5294 phy->instance = instance;
5295 phy->ubuf = (uint8_t *) attr;
5296 bfa_phy_query_send(phy);
5297
5298 return BFA_STATUS_OK;
5299}
5300
5301/*
5302 * Get phy stats.
5303 *
5304 * @param[in] phy - phy structure
5305 * @param[in] instance - phy image instance
5306 * @param[in] stats - pointer to phy stats
5307 * @param[in] cbfn - callback function
5308 * @param[in] cbarg - callback argument
5309 *
5310 * Return status.
5311 */
5312bfa_status_t
5313bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5314 struct bfa_phy_stats_s *stats,
5315 bfa_cb_phy_t cbfn, void *cbarg)
5316{
5317 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5318 bfa_trc(phy, instance);
5319
5320 if (!bfa_phy_present(phy))
5321 return BFA_STATUS_PHY_NOT_PRESENT;
5322
5323 if (!bfa_ioc_is_operational(phy->ioc))
5324 return BFA_STATUS_IOC_NON_OP;
5325
5326 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5327 bfa_trc(phy, phy->op_busy);
5328 return BFA_STATUS_DEVBUSY;
5329 }
5330
5331 phy->op_busy = 1;
5332 phy->cbfn = cbfn;
5333 phy->cbarg = cbarg;
5334 phy->instance = instance;
5335 phy->ubuf = (u8 *) stats;
5336 bfa_phy_stats_send(phy);
5337
5338 return BFA_STATUS_OK;
5339}
5340
5341/*
5342 * Update phy image.
5343 *
5344 * @param[in] phy - phy structure
5345 * @param[in] instance - phy image instance
5346 * @param[in] buf - update data buffer
5347 * @param[in] len - data buffer length
5348 * @param[in] offset - offset relative to starting address
5349 * @param[in] cbfn - callback function
5350 * @param[in] cbarg - callback argument
5351 *
5352 * Return status.
5353 */
5354bfa_status_t
5355bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5356 void *buf, u32 len, u32 offset,
5357 bfa_cb_phy_t cbfn, void *cbarg)
5358{
5359 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5360 bfa_trc(phy, instance);
5361 bfa_trc(phy, len);
5362 bfa_trc(phy, offset);
5363
5364 if (!bfa_phy_present(phy))
5365 return BFA_STATUS_PHY_NOT_PRESENT;
5366
5367 if (!bfa_ioc_is_operational(phy->ioc))
5368 return BFA_STATUS_IOC_NON_OP;
5369
5370 /* 'len' must be in word (4-byte) boundary */
5371 if (!len || (len & 0x03))
5372 return BFA_STATUS_FAILED;
5373
5374 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5375 bfa_trc(phy, phy->op_busy);
5376 return BFA_STATUS_DEVBUSY;
5377 }
5378
5379 phy->op_busy = 1;
5380 phy->cbfn = cbfn;
5381 phy->cbarg = cbarg;
5382 phy->instance = instance;
5383 phy->residue = len;
5384 phy->offset = 0;
5385 phy->addr_off = offset;
5386 phy->ubuf = buf;
5387
5388 bfa_phy_write_send(phy);
5389 return BFA_STATUS_OK;
5390}
5391
5392/*
5393 * Read phy image.
5394 *
5395 * @param[in] phy - phy structure
5396 * @param[in] instance - phy image instance
5397 * @param[in] buf - read data buffer
5398 * @param[in] len - data buffer length
5399 * @param[in] offset - offset relative to starting address
5400 * @param[in] cbfn - callback function
5401 * @param[in] cbarg - callback argument
5402 *
5403 * Return status.
5404 */
5405bfa_status_t
5406bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5407 void *buf, u32 len, u32 offset,
5408 bfa_cb_phy_t cbfn, void *cbarg)
5409{
5410 bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5411 bfa_trc(phy, instance);
5412 bfa_trc(phy, len);
5413 bfa_trc(phy, offset);
5414
5415 if (!bfa_phy_present(phy))
5416 return BFA_STATUS_PHY_NOT_PRESENT;
5417
5418 if (!bfa_ioc_is_operational(phy->ioc))
5419 return BFA_STATUS_IOC_NON_OP;
5420
5421 /* 'len' must be in word (4-byte) boundary */
5422 if (!len || (len & 0x03))
5423 return BFA_STATUS_FAILED;
5424
5425 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5426 bfa_trc(phy, phy->op_busy);
5427 return BFA_STATUS_DEVBUSY;
5428 }
5429
5430 phy->op_busy = 1;
5431 phy->cbfn = cbfn;
5432 phy->cbarg = cbarg;
5433 phy->instance = instance;
5434 phy->residue = len;
5435 phy->offset = 0;
5436 phy->addr_off = offset;
5437 phy->ubuf = buf;
5438 bfa_phy_read_send(phy);
5439
5440 return BFA_STATUS_OK;
5441}
5442
5443/*
5444 * Process phy response messages upon receiving interrupts.
5445 *
5446 * @param[in] phyarg - phy structure
5447 * @param[in] msg - message structure
5448 */
5449void
5450bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5451{
5452 struct bfa_phy_s *phy = phyarg;
5453 u32 status;
5454
5455 union {
5456 struct bfi_phy_query_rsp_s *query;
5457 struct bfi_phy_stats_rsp_s *stats;
5458 struct bfi_phy_write_rsp_s *write;
5459 struct bfi_phy_read_rsp_s *read;
5460 struct bfi_mbmsg_s *msg;
5461 } m;
5462
5463 m.msg = msg;
5464 bfa_trc(phy, msg->mh.msg_id);
5465
5466 if (!phy->op_busy) {
5467 /* receiving response after ioc failure */
5468 bfa_trc(phy, 0x9999);
5469 return;
5470 }
5471
5472 switch (msg->mh.msg_id) {
5473 case BFI_PHY_I2H_QUERY_RSP:
5474 status = be32_to_cpu(m.query->status);
5475 bfa_trc(phy, status);
5476
5477 if (status == BFA_STATUS_OK) {
5478 struct bfa_phy_attr_s *attr =
5479 (struct bfa_phy_attr_s *) phy->ubuf;
5480 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5481 sizeof(struct bfa_phy_attr_s));
5482 bfa_trc(phy, attr->status);
5483 bfa_trc(phy, attr->length);
5484 }
5485
5486 phy->status = status;
5487 phy->op_busy = 0;
5488 if (phy->cbfn)
5489 phy->cbfn(phy->cbarg, phy->status);
5490 break;
5491 case BFI_PHY_I2H_STATS_RSP:
5492 status = be32_to_cpu(m.stats->status);
5493 bfa_trc(phy, status);
5494
5495 if (status == BFA_STATUS_OK) {
5496 struct bfa_phy_stats_s *stats =
5497 (struct bfa_phy_stats_s *) phy->ubuf;
5498 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5499 sizeof(struct bfa_phy_stats_s));
5500 bfa_trc(phy, stats->status);
5501 }
5502
5503 phy->status = status;
5504 phy->op_busy = 0;
5505 if (phy->cbfn)
5506 phy->cbfn(phy->cbarg, phy->status);
5507 break;
5508 case BFI_PHY_I2H_WRITE_RSP:
5509 status = be32_to_cpu(m.write->status);
5510 bfa_trc(phy, status);
5511
5512 if (status != BFA_STATUS_OK || phy->residue == 0) {
5513 phy->status = status;
5514 phy->op_busy = 0;
5515 if (phy->cbfn)
5516 phy->cbfn(phy->cbarg, phy->status);
5517 } else {
5518 bfa_trc(phy, phy->offset);
5519 bfa_phy_write_send(phy);
5520 }
5521 break;
5522 case BFI_PHY_I2H_READ_RSP:
5523 status = be32_to_cpu(m.read->status);
5524 bfa_trc(phy, status);
5525
5526 if (status != BFA_STATUS_OK) {
5527 phy->status = status;
5528 phy->op_busy = 0;
5529 if (phy->cbfn)
5530 phy->cbfn(phy->cbarg, phy->status);
5531 } else {
5532 u32 len = be32_to_cpu(m.read->length);
5533 u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5534 u16 *dbuf = (u16 *)phy->dbuf_kva;
5535 int i, sz = len >> 1;
5536
5537 bfa_trc(phy, phy->offset);
5538 bfa_trc(phy, len);
5539
5540 for (i = 0; i < sz; i++)
5541 buf[i] = be16_to_cpu(dbuf[i]);
5542
5543 phy->residue -= len;
5544 phy->offset += len;
5545
5546 if (phy->residue == 0) {
5547 phy->status = status;
5548 phy->op_busy = 0;
5549 if (phy->cbfn)
5550 phy->cbfn(phy->cbarg, phy->status);
5551 } else
5552 bfa_phy_read_send(phy);
5553 }
5554 break;
5555 default:
5556 WARN_ON(1);
5557 }
5558}
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005559
5560/*
5561 * DCONF module specific
5562 */
5563
5564BFA_MODULE(dconf);
5565
5566/*
5567 * DCONF state machine events
5568 */
5569enum bfa_dconf_event {
5570 BFA_DCONF_SM_INIT = 1, /* dconf Init */
5571 BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
5572 BFA_DCONF_SM_WR = 3, /* binding change, map */
5573 BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
5574 BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
5575 BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
5576};
5577
5578/* forward declaration of DCONF state machine */
5579static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5580 enum bfa_dconf_event event);
5581static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5582 enum bfa_dconf_event event);
5583static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5584 enum bfa_dconf_event event);
5585static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5586 enum bfa_dconf_event event);
5587static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5588 enum bfa_dconf_event event);
5589static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5590 enum bfa_dconf_event event);
5591static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5592 enum bfa_dconf_event event);
5593
5594static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5595static void bfa_dconf_timer(void *cbarg);
5596static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5597static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5598
5599/*
5600 * Begining state of dconf module. Waiting for an event to start.
5601 */
5602static void
5603bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5604{
5605 bfa_status_t bfa_status;
5606 bfa_trc(dconf->bfa, event);
5607
5608 switch (event) {
5609 case BFA_DCONF_SM_INIT:
5610 if (dconf->min_cfg) {
5611 bfa_trc(dconf->bfa, dconf->min_cfg);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005612 bfa_fsm_send_event(&dconf->bfa->iocfc,
5613 IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005614 return;
5615 }
5616 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005617 bfa_timer_start(dconf->bfa, &dconf->timer,
5618 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005619 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5620 BFA_FLASH_PART_DRV, dconf->instance,
5621 dconf->dconf,
5622 sizeof(struct bfa_dconf_s), 0,
5623 bfa_dconf_init_cb, dconf->bfa);
5624 if (bfa_status != BFA_STATUS_OK) {
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005625 bfa_timer_stop(&dconf->timer);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005626 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5627 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5628 return;
5629 }
5630 break;
5631 case BFA_DCONF_SM_EXIT:
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005632 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005633 case BFA_DCONF_SM_IOCDISABLE:
5634 case BFA_DCONF_SM_WR:
5635 case BFA_DCONF_SM_FLASH_COMP:
5636 break;
5637 default:
5638 bfa_sm_fault(dconf->bfa, event);
5639 }
5640}
5641
5642/*
5643 * Read flash for dconf entries and make a call back to the driver once done.
5644 */
5645static void
5646bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5647 enum bfa_dconf_event event)
5648{
5649 bfa_trc(dconf->bfa, event);
5650
5651 switch (event) {
5652 case BFA_DCONF_SM_FLASH_COMP:
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005653 bfa_timer_stop(&dconf->timer);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005654 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5655 break;
5656 case BFA_DCONF_SM_TIMEOUT:
5657 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005658 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_IOC_FAILED);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005659 break;
5660 case BFA_DCONF_SM_EXIT:
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005661 bfa_timer_stop(&dconf->timer);
5662 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5663 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5664 break;
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005665 case BFA_DCONF_SM_IOCDISABLE:
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005666 bfa_timer_stop(&dconf->timer);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005667 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5668 break;
5669 default:
5670 bfa_sm_fault(dconf->bfa, event);
5671 }
5672}
5673
5674/*
5675 * DCONF Module is in ready state. Has completed the initialization.
5676 */
5677static void
5678bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5679{
5680 bfa_trc(dconf->bfa, event);
5681
5682 switch (event) {
5683 case BFA_DCONF_SM_WR:
5684 bfa_timer_start(dconf->bfa, &dconf->timer,
5685 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5686 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5687 break;
5688 case BFA_DCONF_SM_EXIT:
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005689 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005690 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005691 break;
5692 case BFA_DCONF_SM_INIT:
5693 case BFA_DCONF_SM_IOCDISABLE:
5694 break;
5695 default:
5696 bfa_sm_fault(dconf->bfa, event);
5697 }
5698}
5699
5700/*
5701 * entries are dirty, write back to the flash.
5702 */
5703
5704static void
5705bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5706{
5707 bfa_trc(dconf->bfa, event);
5708
5709 switch (event) {
5710 case BFA_DCONF_SM_TIMEOUT:
5711 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5712 bfa_dconf_flash_write(dconf);
5713 break;
5714 case BFA_DCONF_SM_WR:
5715 bfa_timer_stop(&dconf->timer);
5716 bfa_timer_start(dconf->bfa, &dconf->timer,
5717 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5718 break;
5719 case BFA_DCONF_SM_EXIT:
5720 bfa_timer_stop(&dconf->timer);
5721 bfa_timer_start(dconf->bfa, &dconf->timer,
5722 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5723 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5724 bfa_dconf_flash_write(dconf);
5725 break;
5726 case BFA_DCONF_SM_FLASH_COMP:
5727 break;
5728 case BFA_DCONF_SM_IOCDISABLE:
5729 bfa_timer_stop(&dconf->timer);
5730 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5731 break;
5732 default:
5733 bfa_sm_fault(dconf->bfa, event);
5734 }
5735}
5736
5737/*
5738 * Sync the dconf entries to the flash.
5739 */
5740static void
5741bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5742 enum bfa_dconf_event event)
5743{
5744 bfa_trc(dconf->bfa, event);
5745
5746 switch (event) {
5747 case BFA_DCONF_SM_IOCDISABLE:
5748 case BFA_DCONF_SM_FLASH_COMP:
5749 bfa_timer_stop(&dconf->timer);
5750 case BFA_DCONF_SM_TIMEOUT:
5751 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005752 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005753 break;
5754 default:
5755 bfa_sm_fault(dconf->bfa, event);
5756 }
5757}
5758
5759static void
5760bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5761{
5762 bfa_trc(dconf->bfa, event);
5763
5764 switch (event) {
5765 case BFA_DCONF_SM_FLASH_COMP:
5766 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5767 break;
5768 case BFA_DCONF_SM_WR:
5769 bfa_timer_start(dconf->bfa, &dconf->timer,
5770 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5771 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5772 break;
5773 case BFA_DCONF_SM_EXIT:
5774 bfa_timer_start(dconf->bfa, &dconf->timer,
5775 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5776 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5777 break;
5778 case BFA_DCONF_SM_IOCDISABLE:
5779 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5780 break;
5781 default:
5782 bfa_sm_fault(dconf->bfa, event);
5783 }
5784}
5785
5786static void
5787bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5788 enum bfa_dconf_event event)
5789{
5790 bfa_trc(dconf->bfa, event);
5791
5792 switch (event) {
5793 case BFA_DCONF_SM_INIT:
5794 bfa_timer_start(dconf->bfa, &dconf->timer,
5795 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5796 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5797 break;
5798 case BFA_DCONF_SM_EXIT:
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005799 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005800 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005801 break;
5802 case BFA_DCONF_SM_IOCDISABLE:
5803 break;
5804 default:
5805 bfa_sm_fault(dconf->bfa, event);
5806 }
5807}
5808
5809/*
5810 * Compute and return memory needed by DRV_CFG module.
5811 */
5812static void
5813bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5814 struct bfa_s *bfa)
5815{
5816 struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5817
5818 if (cfg->drvcfg.min_cfg)
5819 bfa_mem_kva_setup(meminfo, dconf_kva,
5820 sizeof(struct bfa_dconf_hdr_s));
5821 else
5822 bfa_mem_kva_setup(meminfo, dconf_kva,
5823 sizeof(struct bfa_dconf_s));
5824}
5825
5826static void
5827bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5828 struct bfa_pcidev_s *pcidev)
5829{
5830 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5831
5832 dconf->bfad = bfad;
5833 dconf->bfa = bfa;
5834 dconf->instance = bfa->ioc.port_id;
5835 bfa_trc(bfa, dconf->instance);
5836
5837 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5838 if (cfg->drvcfg.min_cfg) {
5839 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5840 dconf->min_cfg = BFA_TRUE;
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005841 } else {
5842 dconf->min_cfg = BFA_FALSE;
5843 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5844 }
5845
5846 bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5847 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5848}
5849
5850static void
5851bfa_dconf_init_cb(void *arg, bfa_status_t status)
5852{
5853 struct bfa_s *bfa = arg;
5854 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5855
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005856 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005857 if (status == BFA_STATUS_OK) {
5858 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5859 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5860 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5861 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5862 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5863 }
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005864 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005865}
5866
5867void
5868bfa_dconf_modinit(struct bfa_s *bfa)
5869{
5870 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5871 bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5872}
5873static void
5874bfa_dconf_start(struct bfa_s *bfa)
5875{
5876}
5877
5878static void
5879bfa_dconf_stop(struct bfa_s *bfa)
5880{
5881}
5882
5883static void bfa_dconf_timer(void *cbarg)
5884{
5885 struct bfa_dconf_mod_s *dconf = cbarg;
5886 bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5887}
5888static void
5889bfa_dconf_iocdisable(struct bfa_s *bfa)
5890{
5891 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5892 bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5893}
5894
5895static void
5896bfa_dconf_detach(struct bfa_s *bfa)
5897{
5898}
5899
5900static bfa_status_t
5901bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5902{
5903 bfa_status_t bfa_status;
5904 bfa_trc(dconf->bfa, 0);
5905
5906 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5907 BFA_FLASH_PART_DRV, dconf->instance,
5908 dconf->dconf, sizeof(struct bfa_dconf_s), 0,
5909 bfa_dconf_cbfn, dconf);
5910 if (bfa_status != BFA_STATUS_OK)
5911 WARN_ON(bfa_status);
5912 bfa_trc(dconf->bfa, bfa_status);
5913
5914 return bfa_status;
5915}
5916
5917bfa_status_t
5918bfa_dconf_update(struct bfa_s *bfa)
5919{
5920 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5921 bfa_trc(dconf->bfa, 0);
5922 if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5923 return BFA_STATUS_FAILED;
5924
5925 if (dconf->min_cfg) {
5926 bfa_trc(dconf->bfa, dconf->min_cfg);
5927 return BFA_STATUS_FAILED;
5928 }
5929
5930 bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5931 return BFA_STATUS_OK;
5932}
5933
5934static void
5935bfa_dconf_cbfn(void *arg, bfa_status_t status)
5936{
5937 struct bfa_dconf_mod_s *dconf = arg;
5938 WARN_ON(status);
5939 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5940}
5941
5942void
5943bfa_dconf_modexit(struct bfa_s *bfa)
5944{
5945 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005946 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5947}