blob: 4fbff0394985313945c39de78dff598b13f66157 [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipati7826f302011-07-20 16:59:13 -070019#include "bfad_im.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070020#include "bfa_ioc.h"
Krishna Gudipati11189202011-06-13 15:50:35 -070021#include "bfi_reg.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022#include "bfa_defs.h"
23#include "bfa_defs_svc.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070024
Krishna Gudipati7af074d2010-03-05 19:35:45 -080025BFA_TRC_FILE(CNA, IOC);
Jing Huang7725ccf2009-09-23 17:46:15 -070026
Jing Huang5fbe25c2010-10-18 17:17:23 -070027/*
Jing Huang7725ccf2009-09-23 17:46:15 -070028 * IOC local definitions
29 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070030#define BFA_IOC_TOV 3000 /* msecs */
31#define BFA_IOC_HWSEM_TOV 500 /* msecs */
32#define BFA_IOC_HB_TOV 500 /* msecs */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070033#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
Krishna Gudipati775c7742011-06-13 15:52:12 -070034#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
Jing Huang7725ccf2009-09-23 17:46:15 -070035
36#define bfa_ioc_timer_start(__ioc) \
37 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
38 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
40
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070041#define bfa_hb_timer_start(__ioc) \
42 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
43 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
45
Jing Huang7725ccf2009-09-23 17:46:15 -070046#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
Jing Huang7725ccf2009-09-23 17:46:15 -070047
Jing Huang5fbe25c2010-10-18 17:17:23 -070048/*
Krishna Gudipati0a20de42010-03-05 19:34:20 -080049 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50 */
51
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070052#define bfa_ioc_firmware_lock(__ioc) \
Krishna Gudipati0a20de42010-03-05 19:34:20 -080053 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070054#define bfa_ioc_firmware_unlock(__ioc) \
Krishna Gudipati0a20de42010-03-05 19:34:20 -080055 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
Krishna Gudipati0a20de42010-03-05 19:34:20 -080056#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
Krishna Gudipatif1d584d2010-12-13 16:17:11 -080058#define bfa_ioc_notify_fail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
Jing Huang45d7f0c2011-04-13 11:45:53 -070060#define bfa_ioc_sync_start(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
Krishna Gudipatif1d584d2010-12-13 16:17:11 -080062#define bfa_ioc_sync_join(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64#define bfa_ioc_sync_leave(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66#define bfa_ioc_sync_ack(__ioc) \
67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68#define bfa_ioc_sync_complete(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070070
71#define bfa_ioc_mbox_cmd_pending(__ioc) \
72 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
Jing Huang53440262010-10-18 17:12:29 -070073 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070074
75bfa_boolean_t bfa_auto_recover = BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -070076
77/*
78 * forward declarations
79 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070080static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070081static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
82static void bfa_ioc_timeout(void *ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -070083static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070084static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
85static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
86static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
87static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070088static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
Krishna Gudipati8b070b42011-06-13 15:52:40 -070089static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070090static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
Krishna Gudipatid37779f2011-06-13 15:42:10 -070091static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
92 enum bfa_ioc_event_e event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070093static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
94static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -080095static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070096static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -070097
Jing Huang5fbe25c2010-10-18 17:17:23 -070098/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070099 * IOC state machine definitions/declarations
Jing Huang7725ccf2009-09-23 17:46:15 -0700100 */
101enum ioc_event {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700102 IOC_E_RESET = 1, /* IOC reset request */
103 IOC_E_ENABLE = 2, /* IOC enable request */
104 IOC_E_DISABLE = 3, /* IOC disable request */
105 IOC_E_DETACH = 4, /* driver detach cleanup */
106 IOC_E_ENABLED = 5, /* f/w enabled */
107 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
108 IOC_E_DISABLED = 7, /* f/w disabled */
Krishna Gudipati775c7742011-06-13 15:52:12 -0700109 IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
110 IOC_E_HBFAIL = 9, /* heartbeat failure */
111 IOC_E_HWERROR = 10, /* hardware error interrupt */
112 IOC_E_TIMEOUT = 11, /* timeout */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700113 IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
Jing Huang7725ccf2009-09-23 17:46:15 -0700114};
115
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700116bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700117bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700118bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
119bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
120bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800121bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700122bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700123bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
124bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700125bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700126
127static struct bfa_sm_table_s ioc_sm_table[] = {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700128 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
Jing Huang7725ccf2009-09-23 17:46:15 -0700129 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700130 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
Jing Huang7725ccf2009-09-23 17:46:15 -0700131 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
132 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800133 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700134 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
Jing Huang7725ccf2009-09-23 17:46:15 -0700135 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
136 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700137 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
Jing Huang7725ccf2009-09-23 17:46:15 -0700138};
139
Jing Huang5fbe25c2010-10-18 17:17:23 -0700140/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700141 * IOCPF state machine definitions/declarations
142 */
143
144#define bfa_iocpf_timer_start(__ioc) \
145 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
146 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
147#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
148
Krishna Gudipati775c7742011-06-13 15:52:12 -0700149#define bfa_iocpf_poll_timer_start(__ioc) \
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700150 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
Krishna Gudipati775c7742011-06-13 15:52:12 -0700151 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700152
153#define bfa_sem_timer_start(__ioc) \
154 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
155 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
156#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
157
158/*
159 * Forward declareations for iocpf state machine
160 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700161static void bfa_iocpf_timeout(void *ioc_arg);
162static void bfa_iocpf_sem_timeout(void *ioc_arg);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700163static void bfa_iocpf_poll_timeout(void *ioc_arg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700164
Jing Huang5fbe25c2010-10-18 17:17:23 -0700165/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700166 * IOCPF state machine events
167 */
168enum iocpf_event {
169 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
170 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
171 IOCPF_E_STOP = 3, /* stop on driver detach */
172 IOCPF_E_FWREADY = 4, /* f/w initialization done */
173 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
174 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
175 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
176 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
177 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
178 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
179 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700180 IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700181};
182
Jing Huang5fbe25c2010-10-18 17:17:23 -0700183/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700184 * IOCPF states
185 */
186enum bfa_iocpf_state {
187 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
188 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
189 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
190 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
191 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
192 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
193 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
194 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
195 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
196};
197
198bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
199bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
200bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
201bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
202bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
203bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
204bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800205bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
206 enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700207bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800208bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700209bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
210bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800211bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
212 enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700213bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
214
215static struct bfa_sm_table_s iocpf_sm_table[] = {
216 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
217 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
218 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
219 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
220 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
221 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
222 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800223 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700224 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800225 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700226 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
227 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800228 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700229 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
230};
231
Jing Huang5fbe25c2010-10-18 17:17:23 -0700232/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700233 * IOC State Machine
234 */
235
Jing Huang5fbe25c2010-10-18 17:17:23 -0700236/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700237 * Beginning state. IOC uninit state.
238 */
239
240static void
241bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
242{
243}
244
Jing Huang5fbe25c2010-10-18 17:17:23 -0700245/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700246 * IOC is in uninit state.
247 */
248static void
249bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
250{
251 bfa_trc(ioc, event);
252
253 switch (event) {
254 case IOC_E_RESET:
255 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
256 break;
257
258 default:
259 bfa_sm_fault(ioc, event);
260 }
261}
Jing Huang5fbe25c2010-10-18 17:17:23 -0700262/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700263 * Reset entry actions -- initialize state machine
264 */
265static void
266bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
267{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700268 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
Jing Huang7725ccf2009-09-23 17:46:15 -0700269}
270
Jing Huang5fbe25c2010-10-18 17:17:23 -0700271/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700272 * IOC is in reset state.
Jing Huang7725ccf2009-09-23 17:46:15 -0700273 */
274static void
275bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
276{
277 bfa_trc(ioc, event);
278
279 switch (event) {
280 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700281 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
282 break;
283
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700284 case IOC_E_DISABLE:
285 bfa_ioc_disable_comp(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -0700286 break;
287
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700288 case IOC_E_DETACH:
289 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Jing Huang7725ccf2009-09-23 17:46:15 -0700290 break;
291
292 default:
293 bfa_sm_fault(ioc, event);
294 }
295}
296
297
298static void
299bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
300{
Maggie Zhangf7f738122010-12-09 19:08:43 -0800301 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -0700302}
303
Jing Huang5fbe25c2010-10-18 17:17:23 -0700304/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700305 * Host IOC function is being enabled, awaiting response from firmware.
306 * Semaphore is acquired.
307 */
308static void
309bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
310{
311 bfa_trc(ioc, event);
312
313 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700314 case IOC_E_ENABLED:
Jing Huang7725ccf2009-09-23 17:46:15 -0700315 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
316 break;
317
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800318 case IOC_E_PFFAILED:
319 /* !!! fall through !!! */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700320 case IOC_E_HWERROR:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800321 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700322 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800323 if (event != IOC_E_PFFAILED)
324 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700325 break;
326
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700327 case IOC_E_HWFAILED:
328 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
329 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
330 break;
331
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700332 case IOC_E_DISABLE:
333 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
334 break;
335
336 case IOC_E_DETACH:
337 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800338 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700339 break;
340
341 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700342 break;
343
344 default:
345 bfa_sm_fault(ioc, event);
346 }
347}
348
349
350static void
351bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
352{
353 bfa_ioc_timer_start(ioc);
354 bfa_ioc_send_getattr(ioc);
355}
356
Jing Huang5fbe25c2010-10-18 17:17:23 -0700357/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700358 * IOC configuration in progress. Timer is active.
359 */
360static void
361bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
362{
363 bfa_trc(ioc, event);
364
365 switch (event) {
366 case IOC_E_FWRSP_GETATTR:
367 bfa_ioc_timer_stop(ioc);
368 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
369 break;
370
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800371 case IOC_E_PFFAILED:
Jing Huang7725ccf2009-09-23 17:46:15 -0700372 case IOC_E_HWERROR:
373 bfa_ioc_timer_stop(ioc);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800374 /* !!! fall through !!! */
Jing Huang7725ccf2009-09-23 17:46:15 -0700375 case IOC_E_TIMEOUT:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800376 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700377 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800378 if (event != IOC_E_PFFAILED)
379 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700380 break;
381
382 case IOC_E_DISABLE:
383 bfa_ioc_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700384 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
385 break;
386
387 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700388 break;
389
390 default:
391 bfa_sm_fault(ioc, event);
392 }
393}
394
Jing Huang7725ccf2009-09-23 17:46:15 -0700395static void
396bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
397{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700398 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
399
Jing Huang7725ccf2009-09-23 17:46:15 -0700400 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
Krishna Gudipatid37779f2011-06-13 15:42:10 -0700401 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -0700402 bfa_ioc_hb_monitor(ioc);
Jing Huang88166242010-12-09 17:11:53 -0800403 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -0700404 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -0700405}
406
407static void
408bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
409{
410 bfa_trc(ioc, event);
411
412 switch (event) {
413 case IOC_E_ENABLE:
414 break;
415
416 case IOC_E_DISABLE:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800417 bfa_hb_timer_stop(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -0700418 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
419 break;
420
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800421 case IOC_E_PFFAILED:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700422 case IOC_E_HWERROR:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800423 bfa_hb_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700424 /* !!! fall through !!! */
Jing Huang7725ccf2009-09-23 17:46:15 -0700425 case IOC_E_HBFAIL:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800426 if (ioc->iocpf.auto_recover)
427 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
428 else
429 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
430
Krishna Gudipati775c7742011-06-13 15:52:12 -0700431 bfa_ioc_fail_notify(ioc);
432
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800433 if (event != IOC_E_PFFAILED)
434 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700435 break;
436
437 default:
438 bfa_sm_fault(ioc, event);
439 }
440}
441
442
443static void
444bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
445{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700446 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
Maggie Zhangf7f738122010-12-09 19:08:43 -0800447 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
Jing Huang88166242010-12-09 17:11:53 -0800448 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -0700449 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -0700450}
451
Jing Huang5fbe25c2010-10-18 17:17:23 -0700452/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700453 * IOC is being disabled
454 */
455static void
456bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
457{
458 bfa_trc(ioc, event);
459
460 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700461 case IOC_E_DISABLED:
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800462 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
463 break;
464
465 case IOC_E_HWERROR:
Jing Huang7725ccf2009-09-23 17:46:15 -0700466 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700467 * No state change. Will move to disabled state
468 * after iocpf sm completes failure processing and
469 * moves to disabled state.
Jing Huang7725ccf2009-09-23 17:46:15 -0700470 */
Maggie Zhangf7f738122010-12-09 19:08:43 -0800471 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700472 break;
473
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700474 case IOC_E_HWFAILED:
475 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
476 bfa_ioc_disable_comp(ioc);
477 break;
478
Jing Huang7725ccf2009-09-23 17:46:15 -0700479 default:
480 bfa_sm_fault(ioc, event);
481 }
482}
483
Jing Huang5fbe25c2010-10-18 17:17:23 -0700484/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700485 * IOC disable completion entry.
486 */
487static void
488bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
489{
490 bfa_ioc_disable_comp(ioc);
491}
492
493static void
494bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
495{
496 bfa_trc(ioc, event);
497
498 switch (event) {
499 case IOC_E_ENABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700500 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700501 break;
502
503 case IOC_E_DISABLE:
504 ioc->cbfn->disable_cbfn(ioc->bfa);
505 break;
506
Jing Huang7725ccf2009-09-23 17:46:15 -0700507 case IOC_E_DETACH:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700508 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800509 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Jing Huang7725ccf2009-09-23 17:46:15 -0700510 break;
511
512 default:
513 bfa_sm_fault(ioc, event);
514 }
515}
516
517
518static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800519bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -0700520{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800521 bfa_trc(ioc, 0);
Jing Huang7725ccf2009-09-23 17:46:15 -0700522}
523
Jing Huang5fbe25c2010-10-18 17:17:23 -0700524/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800525 * Hardware initialization retry.
Jing Huang7725ccf2009-09-23 17:46:15 -0700526 */
527static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800528bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
Jing Huang7725ccf2009-09-23 17:46:15 -0700529{
530 bfa_trc(ioc, event);
531
532 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700533 case IOC_E_ENABLED:
534 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
535 break;
536
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800537 case IOC_E_PFFAILED:
538 case IOC_E_HWERROR:
Jing Huang5fbe25c2010-10-18 17:17:23 -0700539 /*
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800540 * Initialization retry failed.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700541 */
542 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700543 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800544 if (event != IOC_E_PFFAILED)
545 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
546 break;
547
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700548 case IOC_E_HWFAILED:
549 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
550 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
551 break;
552
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800553 case IOC_E_ENABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700554 break;
555
Jing Huang7725ccf2009-09-23 17:46:15 -0700556 case IOC_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700557 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700558 break;
559
560 case IOC_E_DETACH:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700561 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800562 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Jing Huang7725ccf2009-09-23 17:46:15 -0700563 break;
564
565 default:
566 bfa_sm_fault(ioc, event);
567 }
568}
569
570
571static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700572bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -0700573{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800574 bfa_trc(ioc, 0);
Jing Huang7725ccf2009-09-23 17:46:15 -0700575}
576
Jing Huang5fbe25c2010-10-18 17:17:23 -0700577/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700578 * IOC failure.
Jing Huang7725ccf2009-09-23 17:46:15 -0700579 */
580static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700581bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
Jing Huang7725ccf2009-09-23 17:46:15 -0700582{
583 bfa_trc(ioc, event);
584
585 switch (event) {
586
587 case IOC_E_ENABLE:
588 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
589 break;
590
591 case IOC_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700592 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700593 break;
594
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800595 case IOC_E_DETACH:
596 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
597 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
598 break;
599
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800600 case IOC_E_HWERROR:
Krishna Gudipati881c1b32012-08-22 19:52:02 -0700601 case IOC_E_HWFAILED:
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800602 /*
Krishna Gudipati881c1b32012-08-22 19:52:02 -0700603 * HB failure / HW error notification, ignore.
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800604 */
605 break;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700606 default:
607 bfa_sm_fault(ioc, event);
608 }
609}
610
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700611static void
612bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
613{
614 bfa_trc(ioc, 0);
615}
616
617static void
618bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
619{
620 bfa_trc(ioc, event);
621
622 switch (event) {
623 case IOC_E_ENABLE:
624 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
625 break;
626
627 case IOC_E_DISABLE:
628 ioc->cbfn->disable_cbfn(ioc->bfa);
629 break;
630
631 case IOC_E_DETACH:
632 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
633 break;
634
Krishna Gudipati881c1b32012-08-22 19:52:02 -0700635 case IOC_E_HWERROR:
636 /* Ignore - already in hwfail state */
637 break;
638
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700639 default:
640 bfa_sm_fault(ioc, event);
641 }
642}
643
Jing Huang5fbe25c2010-10-18 17:17:23 -0700644/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700645 * IOCPF State Machine
646 */
647
Jing Huang5fbe25c2010-10-18 17:17:23 -0700648/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700649 * Reset entry actions -- initialize state machine
650 */
651static void
652bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
653{
Krishna Gudipati775c7742011-06-13 15:52:12 -0700654 iocpf->fw_mismatch_notified = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700655 iocpf->auto_recover = bfa_auto_recover;
656}
657
Jing Huang5fbe25c2010-10-18 17:17:23 -0700658/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700659 * Beginning state. IOC is in reset state.
660 */
661static void
662bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
663{
664 struct bfa_ioc_s *ioc = iocpf->ioc;
665
666 bfa_trc(ioc, event);
667
668 switch (event) {
669 case IOCPF_E_ENABLE:
670 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
671 break;
672
673 case IOCPF_E_STOP:
674 break;
675
676 default:
677 bfa_sm_fault(ioc, event);
678 }
679}
680
Jing Huang5fbe25c2010-10-18 17:17:23 -0700681/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700682 * Semaphore should be acquired for version check.
683 */
684static void
685bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
686{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700687 struct bfi_ioc_image_hdr_s fwhdr;
Krishna Gudipati89196782012-03-13 17:38:56 -0700688 u32 r32, fwstate, pgnum, pgoff, loff = 0;
689 int i;
690
691 /*
692 * Spin on init semaphore to serialize.
693 */
694 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
695 while (r32 & 0x1) {
696 udelay(20);
697 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
698 }
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700699
700 /* h/w sem init */
Krishna Gudipati89196782012-03-13 17:38:56 -0700701 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
702 if (fwstate == BFI_IOC_UNINIT) {
703 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700704 goto sem_get;
Krishna Gudipati89196782012-03-13 17:38:56 -0700705 }
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700706
707 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
708
Krishna Gudipati89196782012-03-13 17:38:56 -0700709 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
710 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700711 goto sem_get;
Krishna Gudipati89196782012-03-13 17:38:56 -0700712 }
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700713
714 /*
Krishna Gudipati89196782012-03-13 17:38:56 -0700715 * Clear fwver hdr
716 */
717 pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
718 pgoff = PSS_SMEM_PGOFF(loff);
719 writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
720
721 for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
722 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
723 loff += sizeof(u32);
724 }
725
726 bfa_trc(iocpf->ioc, fwstate);
727 bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
728 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
729 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
730
731 /*
732 * Unlock the hw semaphore. Should be here only once per boot.
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700733 */
Krishna Gudipati7ac83b12012-09-21 17:24:21 -0700734 bfa_ioc_ownership_reset(iocpf->ioc);
Krishna Gudipati89196782012-03-13 17:38:56 -0700735
736 /*
737 * unlock init semaphore.
738 */
739 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
740
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700741sem_get:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700742 bfa_ioc_hw_sem_get(iocpf->ioc);
743}
744
Jing Huang5fbe25c2010-10-18 17:17:23 -0700745/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700746 * Awaiting h/w semaphore to continue with version check.
747 */
748static void
749bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
750{
751 struct bfa_ioc_s *ioc = iocpf->ioc;
752
753 bfa_trc(ioc, event);
754
755 switch (event) {
756 case IOCPF_E_SEMLOCKED:
757 if (bfa_ioc_firmware_lock(ioc)) {
Jing Huang45d7f0c2011-04-13 11:45:53 -0700758 if (bfa_ioc_sync_start(ioc)) {
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800759 bfa_ioc_sync_join(ioc);
760 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
761 } else {
762 bfa_ioc_firmware_unlock(ioc);
763 writel(1, ioc->ioc_regs.ioc_sem_reg);
764 bfa_sem_timer_start(ioc);
765 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700766 } else {
Maggie Zhangf7f738122010-12-09 19:08:43 -0800767 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700768 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
769 }
770 break;
771
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700772 case IOCPF_E_SEM_ERROR:
773 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
774 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
775 break;
776
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700777 case IOCPF_E_DISABLE:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800778 bfa_sem_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700779 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800780 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700781 break;
782
783 case IOCPF_E_STOP:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800784 bfa_sem_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700785 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
786 break;
787
788 default:
789 bfa_sm_fault(ioc, event);
790 }
791}
792
Jing Huang5fbe25c2010-10-18 17:17:23 -0700793/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700794 * Notify enable completion callback.
795 */
796static void
797bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
798{
799 /*
800 * Call only the first time sm enters fwmismatch state.
801 */
Krishna Gudipati775c7742011-06-13 15:52:12 -0700802 if (iocpf->fw_mismatch_notified == BFA_FALSE)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700803 bfa_ioc_pf_fwmismatch(iocpf->ioc);
804
Krishna Gudipati775c7742011-06-13 15:52:12 -0700805 iocpf->fw_mismatch_notified = BFA_TRUE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700806 bfa_iocpf_timer_start(iocpf->ioc);
807}
808
Jing Huang5fbe25c2010-10-18 17:17:23 -0700809/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700810 * Awaiting firmware version match.
811 */
812static void
813bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
814{
815 struct bfa_ioc_s *ioc = iocpf->ioc;
816
817 bfa_trc(ioc, event);
818
819 switch (event) {
820 case IOCPF_E_TIMEOUT:
821 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
822 break;
823
824 case IOCPF_E_DISABLE:
825 bfa_iocpf_timer_stop(ioc);
826 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800827 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700828 break;
829
830 case IOCPF_E_STOP:
831 bfa_iocpf_timer_stop(ioc);
832 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
833 break;
834
835 default:
836 bfa_sm_fault(ioc, event);
837 }
838}
839
Jing Huang5fbe25c2010-10-18 17:17:23 -0700840/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700841 * Request for semaphore.
842 */
843static void
844bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
845{
846 bfa_ioc_hw_sem_get(iocpf->ioc);
847}
848
Jing Huang5fbe25c2010-10-18 17:17:23 -0700849/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700850 * Awaiting semaphore for h/w initialzation.
851 */
852static void
853bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
854{
855 struct bfa_ioc_s *ioc = iocpf->ioc;
856
857 bfa_trc(ioc, event);
858
859 switch (event) {
860 case IOCPF_E_SEMLOCKED:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800861 if (bfa_ioc_sync_complete(ioc)) {
862 bfa_ioc_sync_join(ioc);
863 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
864 } else {
865 writel(1, ioc->ioc_regs.ioc_sem_reg);
866 bfa_sem_timer_start(ioc);
867 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700868 break;
869
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700870 case IOCPF_E_SEM_ERROR:
871 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
872 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
873 break;
874
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700875 case IOCPF_E_DISABLE:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800876 bfa_sem_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800877 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700878 break;
879
880 default:
881 bfa_sm_fault(ioc, event);
882 }
883}
884
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700885static void
886bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
887{
Krishna Gudipati775c7742011-06-13 15:52:12 -0700888 iocpf->poll_time = 0;
Maggie Zhangf7f738122010-12-09 19:08:43 -0800889 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700890}
891
Jing Huang5fbe25c2010-10-18 17:17:23 -0700892/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700893 * Hardware is being initialized. Interrupts are enabled.
894 * Holding hardware semaphore lock.
895 */
896static void
897bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
898{
899 struct bfa_ioc_s *ioc = iocpf->ioc;
900
901 bfa_trc(ioc, event);
902
903 switch (event) {
904 case IOCPF_E_FWREADY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700905 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
906 break;
907
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700908 case IOCPF_E_TIMEOUT:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800909 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700910 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800911 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700912 break;
913
914 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700915 bfa_iocpf_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800916 bfa_ioc_sync_leave(ioc);
917 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700918 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
919 break;
920
921 default:
922 bfa_sm_fault(ioc, event);
923 }
924}
925
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700926static void
927bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
928{
929 bfa_iocpf_timer_start(iocpf->ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700930 /*
931 * Enable Interrupts before sending fw IOC ENABLE cmd.
932 */
933 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700934 bfa_ioc_send_enable(iocpf->ioc);
935}
936
Jing Huang5fbe25c2010-10-18 17:17:23 -0700937/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700938 * Host IOC function is being enabled, awaiting response from firmware.
939 * Semaphore is acquired.
940 */
941static void
942bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
943{
944 struct bfa_ioc_s *ioc = iocpf->ioc;
945
946 bfa_trc(ioc, event);
947
948 switch (event) {
949 case IOCPF_E_FWRSP_ENABLE:
950 bfa_iocpf_timer_stop(ioc);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800951 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700952 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
953 break;
954
955 case IOCPF_E_INITFAIL:
956 bfa_iocpf_timer_stop(ioc);
957 /*
958 * !!! fall through !!!
959 */
960
961 case IOCPF_E_TIMEOUT:
Maggie Zhangf7f738122010-12-09 19:08:43 -0800962 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700963 if (event == IOCPF_E_TIMEOUT)
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800964 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800965 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700966 break;
967
968 case IOCPF_E_DISABLE:
969 bfa_iocpf_timer_stop(ioc);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800970 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700971 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
972 break;
973
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700974 default:
975 bfa_sm_fault(ioc, event);
976 }
977}
978
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700979static void
980bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
981{
Maggie Zhangf7f738122010-12-09 19:08:43 -0800982 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700983}
984
985static void
986bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
987{
988 struct bfa_ioc_s *ioc = iocpf->ioc;
989
990 bfa_trc(ioc, event);
991
992 switch (event) {
993 case IOCPF_E_DISABLE:
994 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
995 break;
996
997 case IOCPF_E_GETATTRFAIL:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800998 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700999 break;
1000
1001 case IOCPF_E_FAIL:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001002 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001003 break;
1004
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001005 default:
1006 bfa_sm_fault(ioc, event);
1007 }
1008}
1009
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001010static void
1011bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1012{
1013 bfa_iocpf_timer_start(iocpf->ioc);
1014 bfa_ioc_send_disable(iocpf->ioc);
1015}
1016
Jing Huang5fbe25c2010-10-18 17:17:23 -07001017/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001018 * IOC is being disabled
1019 */
1020static void
1021bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1022{
1023 struct bfa_ioc_s *ioc = iocpf->ioc;
1024
1025 bfa_trc(ioc, event);
1026
1027 switch (event) {
1028 case IOCPF_E_FWRSP_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001029 bfa_iocpf_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001030 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001031 break;
1032
1033 case IOCPF_E_FAIL:
1034 bfa_iocpf_timer_stop(ioc);
1035 /*
1036 * !!! fall through !!!
1037 */
1038
1039 case IOCPF_E_TIMEOUT:
Jing Huang53440262010-10-18 17:12:29 -07001040 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001041 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001042 break;
1043
1044 case IOCPF_E_FWRSP_ENABLE:
1045 break;
1046
1047 default:
1048 bfa_sm_fault(ioc, event);
1049 }
1050}
1051
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001052static void
1053bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1054{
1055 bfa_ioc_hw_sem_get(iocpf->ioc);
1056}
1057
Jing Huang8f4bfad2010-12-26 21:50:10 -08001058/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001059 * IOC hb ack request is being removed.
1060 */
1061static void
1062bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1063{
1064 struct bfa_ioc_s *ioc = iocpf->ioc;
1065
1066 bfa_trc(ioc, event);
1067
1068 switch (event) {
1069 case IOCPF_E_SEMLOCKED:
1070 bfa_ioc_sync_leave(ioc);
1071 writel(1, ioc->ioc_regs.ioc_sem_reg);
1072 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1073 break;
1074
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001075 case IOCPF_E_SEM_ERROR:
1076 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1077 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1078 break;
1079
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001080 case IOCPF_E_FAIL:
1081 break;
1082
1083 default:
1084 bfa_sm_fault(ioc, event);
1085 }
1086}
1087
Jing Huang5fbe25c2010-10-18 17:17:23 -07001088/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001089 * IOC disable completion entry.
1090 */
1091static void
1092bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1093{
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001094 bfa_ioc_mbox_flush(iocpf->ioc);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001095 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001096}
1097
1098static void
1099bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1100{
1101 struct bfa_ioc_s *ioc = iocpf->ioc;
1102
1103 bfa_trc(ioc, event);
1104
1105 switch (event) {
1106 case IOCPF_E_ENABLE:
1107 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1108 break;
1109
1110 case IOCPF_E_STOP:
1111 bfa_ioc_firmware_unlock(ioc);
1112 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1113 break;
1114
1115 default:
1116 bfa_sm_fault(ioc, event);
1117 }
1118}
1119
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001120static void
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001121bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1122{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001123 bfa_ioc_debug_save_ftrc(iocpf->ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001124 bfa_ioc_hw_sem_get(iocpf->ioc);
1125}
1126
Jing Huang8f4bfad2010-12-26 21:50:10 -08001127/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001128 * Hardware initialization failed.
1129 */
1130static void
1131bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1132{
1133 struct bfa_ioc_s *ioc = iocpf->ioc;
1134
1135 bfa_trc(ioc, event);
1136
1137 switch (event) {
1138 case IOCPF_E_SEMLOCKED:
1139 bfa_ioc_notify_fail(ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -07001140 bfa_ioc_sync_leave(ioc);
1141 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1142 writel(1, ioc->ioc_regs.ioc_sem_reg);
1143 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001144 break;
1145
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001146 case IOCPF_E_SEM_ERROR:
1147 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1148 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1149 break;
1150
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001151 case IOCPF_E_DISABLE:
1152 bfa_sem_timer_stop(ioc);
1153 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1154 break;
1155
1156 case IOCPF_E_STOP:
1157 bfa_sem_timer_stop(ioc);
1158 bfa_ioc_firmware_unlock(ioc);
1159 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1160 break;
1161
1162 case IOCPF_E_FAIL:
1163 break;
1164
1165 default:
1166 bfa_sm_fault(ioc, event);
1167 }
1168}
1169
1170static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001171bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1172{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001173 bfa_trc(iocpf->ioc, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001174}
1175
Jing Huang5fbe25c2010-10-18 17:17:23 -07001176/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001177 * Hardware initialization failed.
1178 */
1179static void
1180bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1181{
1182 struct bfa_ioc_s *ioc = iocpf->ioc;
1183
1184 bfa_trc(ioc, event);
1185
1186 switch (event) {
1187 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001188 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1189 break;
1190
1191 case IOCPF_E_STOP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001192 bfa_ioc_firmware_unlock(ioc);
1193 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1194 break;
1195
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001196 default:
1197 bfa_sm_fault(ioc, event);
1198 }
1199}
1200
1201static void
1202bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1203{
Jing Huang8f4bfad2010-12-26 21:50:10 -08001204 /*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001205 * Mark IOC as failed in hardware and stop firmware.
1206 */
1207 bfa_ioc_lpu_stop(iocpf->ioc);
1208
Jing Huang8f4bfad2010-12-26 21:50:10 -08001209 /*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001210 * Flush any queued up mailbox requests.
1211 */
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001212 bfa_ioc_mbox_flush(iocpf->ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001213
1214 bfa_ioc_hw_sem_get(iocpf->ioc);
1215}
1216
1217static void
1218bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1219{
1220 struct bfa_ioc_s *ioc = iocpf->ioc;
1221
1222 bfa_trc(ioc, event);
1223
1224 switch (event) {
1225 case IOCPF_E_SEMLOCKED:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001226 bfa_ioc_sync_ack(ioc);
1227 bfa_ioc_notify_fail(ioc);
1228 if (!iocpf->auto_recover) {
1229 bfa_ioc_sync_leave(ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -07001230 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001231 writel(1, ioc->ioc_regs.ioc_sem_reg);
1232 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1233 } else {
1234 if (bfa_ioc_sync_complete(ioc))
1235 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1236 else {
1237 writel(1, ioc->ioc_regs.ioc_sem_reg);
1238 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1239 }
1240 }
1241 break;
1242
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001243 case IOCPF_E_SEM_ERROR:
1244 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1245 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1246 break;
1247
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001248 case IOCPF_E_DISABLE:
1249 bfa_sem_timer_stop(ioc);
1250 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1251 break;
1252
1253 case IOCPF_E_FAIL:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001254 break;
1255
1256 default:
1257 bfa_sm_fault(ioc, event);
1258 }
1259}
1260
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001261static void
1262bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1263{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001264 bfa_trc(iocpf->ioc, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001265}
1266
Jing Huang5fbe25c2010-10-18 17:17:23 -07001267/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001268 * IOC is in failed state.
1269 */
1270static void
1271bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1272{
1273 struct bfa_ioc_s *ioc = iocpf->ioc;
1274
1275 bfa_trc(ioc, event);
1276
1277 switch (event) {
1278 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001279 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1280 break;
1281
Jing Huang7725ccf2009-09-23 17:46:15 -07001282 default:
1283 bfa_sm_fault(ioc, event);
1284 }
1285}
1286
Jing Huang5fbe25c2010-10-18 17:17:23 -07001287/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -08001288 * BFA IOC private functions
Jing Huang7725ccf2009-09-23 17:46:15 -07001289 */
1290
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001291/*
1292 * Notify common modules registered for notification.
1293 */
1294static void
1295bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1296{
1297 struct bfa_ioc_notify_s *notify;
1298 struct list_head *qe;
1299
1300 list_for_each(qe, &ioc->notify_q) {
1301 notify = (struct bfa_ioc_notify_s *)qe;
1302 notify->cbfn(notify->cbarg, event);
1303 }
1304}
1305
Jing Huang7725ccf2009-09-23 17:46:15 -07001306static void
1307bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1308{
Jing Huang7725ccf2009-09-23 17:46:15 -07001309 ioc->cbfn->disable_cbfn(ioc->bfa);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001310 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
Jing Huang7725ccf2009-09-23 17:46:15 -07001311}
1312
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001313bfa_boolean_t
Jing Huang53440262010-10-18 17:12:29 -07001314bfa_ioc_sem_get(void __iomem *sem_reg)
Jing Huang7725ccf2009-09-23 17:46:15 -07001315{
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001316 u32 r32;
1317 int cnt = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001318#define BFA_SEM_SPINCNT 3000
Jing Huang7725ccf2009-09-23 17:46:15 -07001319
Jing Huang53440262010-10-18 17:12:29 -07001320 r32 = readl(sem_reg);
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001321
Krishna Gudipati11189202011-06-13 15:50:35 -07001322 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001323 cnt++;
Jing Huang6a18b162010-10-18 17:08:54 -07001324 udelay(2);
Jing Huang53440262010-10-18 17:12:29 -07001325 r32 = readl(sem_reg);
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001326 }
1327
Krishna Gudipati11189202011-06-13 15:50:35 -07001328 if (!(r32 & 1))
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001329 return BFA_TRUE;
1330
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001331 return BFA_FALSE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001332}
1333
Jing Huang7725ccf2009-09-23 17:46:15 -07001334static void
1335bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1336{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001337 u32 r32;
Jing Huang7725ccf2009-09-23 17:46:15 -07001338
Jing Huang5fbe25c2010-10-18 17:17:23 -07001339 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001340 * First read to the semaphore register will return 0, subsequent reads
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001341 * will return 1. Semaphore is released by writing 1 to the register
Jing Huang7725ccf2009-09-23 17:46:15 -07001342 */
Jing Huang53440262010-10-18 17:12:29 -07001343 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001344 if (r32 == ~0) {
1345 WARN_ON(r32 == ~0);
1346 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1347 return;
1348 }
Krishna Gudipati11189202011-06-13 15:50:35 -07001349 if (!(r32 & 1)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001350 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
Jing Huang7725ccf2009-09-23 17:46:15 -07001351 return;
1352 }
1353
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001354 bfa_sem_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001355}
1356
Jing Huang5fbe25c2010-10-18 17:17:23 -07001357/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001358 * Initialize LPU local memory (aka secondary memory / SRAM)
1359 */
1360static void
1361bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1362{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001363 u32 pss_ctl;
1364 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001365#define PSS_LMEM_INIT_TIME 10000
1366
Jing Huang53440262010-10-18 17:12:29 -07001367 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001368 pss_ctl &= ~__PSS_LMEM_RESET;
1369 pss_ctl |= __PSS_LMEM_INIT_EN;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001370
1371 /*
1372 * i2c workaround 12.5khz clock
1373 */
1374 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
Jing Huang53440262010-10-18 17:12:29 -07001375 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001376
Jing Huang5fbe25c2010-10-18 17:17:23 -07001377 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001378 * wait for memory initialization to be complete
1379 */
1380 i = 0;
1381 do {
Jing Huang53440262010-10-18 17:12:29 -07001382 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001383 i++;
1384 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1385
Jing Huang5fbe25c2010-10-18 17:17:23 -07001386 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001387 * If memory initialization is not successful, IOC timeout will catch
1388 * such failures.
1389 */
Jing Huangd4b671c2010-12-26 21:46:35 -08001390 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
Jing Huang7725ccf2009-09-23 17:46:15 -07001391 bfa_trc(ioc, pss_ctl);
1392
1393 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
Jing Huang53440262010-10-18 17:12:29 -07001394 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001395}
1396
1397static void
1398bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1399{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001400 u32 pss_ctl;
Jing Huang7725ccf2009-09-23 17:46:15 -07001401
Jing Huang5fbe25c2010-10-18 17:17:23 -07001402 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001403 * Take processor out of reset.
1404 */
Jing Huang53440262010-10-18 17:12:29 -07001405 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001406 pss_ctl &= ~__PSS_LPU0_RESET;
1407
Jing Huang53440262010-10-18 17:12:29 -07001408 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001409}
1410
1411static void
1412bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1413{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001414 u32 pss_ctl;
Jing Huang7725ccf2009-09-23 17:46:15 -07001415
Jing Huang5fbe25c2010-10-18 17:17:23 -07001416 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001417 * Put processors in reset.
1418 */
Jing Huang53440262010-10-18 17:12:29 -07001419 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001420 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1421
Jing Huang53440262010-10-18 17:12:29 -07001422 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001423}
1424
Jing Huang5fbe25c2010-10-18 17:17:23 -07001425/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001426 * Get driver and firmware versions.
1427 */
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001428void
Jing Huang7725ccf2009-09-23 17:46:15 -07001429bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1430{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001431 u32 pgnum, pgoff;
1432 u32 loff = 0;
1433 int i;
1434 u32 *fwsig = (u32 *) fwhdr;
Jing Huang7725ccf2009-09-23 17:46:15 -07001435
Maggie Zhangf7f738122010-12-09 19:08:43 -08001436 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1437 pgoff = PSS_SMEM_PGOFF(loff);
Jing Huang53440262010-10-18 17:12:29 -07001438 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001439
1440 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1441 i++) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001442 fwsig[i] =
1443 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
Jing Huang7725ccf2009-09-23 17:46:15 -07001444 loff += sizeof(u32);
1445 }
1446}
1447
Jing Huang5fbe25c2010-10-18 17:17:23 -07001448/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001449 * Returns TRUE if same.
1450 */
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001451bfa_boolean_t
Jing Huang7725ccf2009-09-23 17:46:15 -07001452bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1453{
1454 struct bfi_ioc_image_hdr_s *drv_fwhdr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001455 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001456
Jing Huang293f82d2010-07-08 19:45:20 -07001457 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
Krishna Gudipati11189202011-06-13 15:50:35 -07001458 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
Jing Huang7725ccf2009-09-23 17:46:15 -07001459
1460 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
Krishna Gudipati881c1b32012-08-22 19:52:02 -07001461 if (fwhdr->md5sum[i] != cpu_to_le32(drv_fwhdr->md5sum[i])) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001462 bfa_trc(ioc, i);
1463 bfa_trc(ioc, fwhdr->md5sum[i]);
1464 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1465 return BFA_FALSE;
1466 }
1467 }
1468
1469 bfa_trc(ioc, fwhdr->md5sum[0]);
1470 return BFA_TRUE;
1471}
1472
Jing Huang5fbe25c2010-10-18 17:17:23 -07001473/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001474 * Return true if current running version is valid. Firmware signature and
1475 * execution context (driver/bios) must match.
1476 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001477static bfa_boolean_t
1478bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07001479{
1480 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1481
Jing Huang7725ccf2009-09-23 17:46:15 -07001482 bfa_ioc_fwver_get(ioc, &fwhdr);
Jing Huang293f82d2010-07-08 19:45:20 -07001483 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
Krishna Gudipati11189202011-06-13 15:50:35 -07001484 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
Jing Huang7725ccf2009-09-23 17:46:15 -07001485
Krishna Gudipati881c1b32012-08-22 19:52:02 -07001486 if (fwhdr.signature != cpu_to_le32(drv_fwhdr->signature)) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001487 bfa_trc(ioc, fwhdr.signature);
1488 bfa_trc(ioc, drv_fwhdr->signature);
1489 return BFA_FALSE;
1490 }
1491
Krishna Gudipati11189202011-06-13 15:50:35 -07001492 if (swab32(fwhdr.bootenv) != boot_env) {
1493 bfa_trc(ioc, fwhdr.bootenv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001494 bfa_trc(ioc, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07001495 return BFA_FALSE;
1496 }
1497
1498 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1499}
1500
Jing Huang5fbe25c2010-10-18 17:17:23 -07001501/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001502 * Conditionally flush any pending message from firmware at start.
1503 */
1504static void
1505bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1506{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001507 u32 r32;
Jing Huang7725ccf2009-09-23 17:46:15 -07001508
Jing Huang53440262010-10-18 17:12:29 -07001509 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001510 if (r32)
Jing Huang53440262010-10-18 17:12:29 -07001511 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001512}
1513
Jing Huang7725ccf2009-09-23 17:46:15 -07001514static void
1515bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1516{
1517 enum bfi_ioc_state ioc_fwstate;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001518 bfa_boolean_t fwvalid;
1519 u32 boot_type;
1520 u32 boot_env;
Jing Huang7725ccf2009-09-23 17:46:15 -07001521
Jing Huang53440262010-10-18 17:12:29 -07001522 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07001523
1524 if (force)
1525 ioc_fwstate = BFI_IOC_UNINIT;
1526
1527 bfa_trc(ioc, ioc_fwstate);
1528
Krishna Gudipati11189202011-06-13 15:50:35 -07001529 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1530 boot_env = BFI_FWBOOT_ENV_OS;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001531
Jing Huang5fbe25c2010-10-18 17:17:23 -07001532 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001533 * check if firmware is valid
1534 */
1535 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001536 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07001537
1538 if (!fwvalid) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001539 bfa_ioc_boot(ioc, boot_type, boot_env);
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001540 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001541 return;
1542 }
1543
Jing Huang5fbe25c2010-10-18 17:17:23 -07001544 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001545 * If hardware initialization is in progress (initialized by other IOC),
1546 * just wait for an initialization completion interrupt.
1547 */
1548 if (ioc_fwstate == BFI_IOC_INITING) {
Krishna Gudipati775c7742011-06-13 15:52:12 -07001549 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001550 return;
1551 }
1552
Jing Huang5fbe25c2010-10-18 17:17:23 -07001553 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001554 * If IOC function is disabled and firmware version is same,
1555 * just re-enable IOC.
Jing Huang07b28382010-07-08 19:59:24 -07001556 *
1557 * If option rom, IOC must not be in operational state. With
1558 * convergence, IOC will be in operational state when 2nd driver
1559 * is loaded.
Jing Huang7725ccf2009-09-23 17:46:15 -07001560 */
Jing Huang8f4bfad2010-12-26 21:50:10 -08001561 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001562
Jing Huang5fbe25c2010-10-18 17:17:23 -07001563 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001564 * When using MSI-X any pending firmware ready event should
1565 * be flushed. Otherwise MSI-X interrupts are not delivered.
1566 */
1567 bfa_ioc_msgflush(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001568 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
Jing Huang7725ccf2009-09-23 17:46:15 -07001569 return;
1570 }
1571
Jing Huang5fbe25c2010-10-18 17:17:23 -07001572 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001573 * Initialize the h/w for any other states.
1574 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001575 bfa_ioc_boot(ioc, boot_type, boot_env);
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001576 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001577}
1578
1579static void
1580bfa_ioc_timeout(void *ioc_arg)
1581{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001582 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
Jing Huang7725ccf2009-09-23 17:46:15 -07001583
1584 bfa_trc(ioc, 0);
1585 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1586}
1587
1588void
1589bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1590{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001591 u32 *msgp = (u32 *) ioc_msg;
1592 u32 i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001593
1594 bfa_trc(ioc, msgp[0]);
1595 bfa_trc(ioc, len);
1596
Jing Huangd4b671c2010-12-26 21:46:35 -08001597 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
Jing Huang7725ccf2009-09-23 17:46:15 -07001598
1599 /*
1600 * first write msg to mailbox registers
1601 */
1602 for (i = 0; i < len / sizeof(u32); i++)
Jing Huang53440262010-10-18 17:12:29 -07001603 writel(cpu_to_le32(msgp[i]),
1604 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
Jing Huang7725ccf2009-09-23 17:46:15 -07001605
1606 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
Jing Huang53440262010-10-18 17:12:29 -07001607 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
Jing Huang7725ccf2009-09-23 17:46:15 -07001608
1609 /*
1610 * write 1 to mailbox CMD to trigger LPU event
1611 */
Jing Huang53440262010-10-18 17:12:29 -07001612 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1613 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001614}
1615
1616static void
1617bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1618{
1619 struct bfi_ioc_ctrl_req_s enable_req;
Maggie Zhangf16a1752010-12-09 19:12:32 -08001620 struct timeval tv;
Jing Huang7725ccf2009-09-23 17:46:15 -07001621
1622 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1623 bfa_ioc_portid(ioc));
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001624 enable_req.clscode = cpu_to_be16(ioc->clscode);
Maggie Zhangf16a1752010-12-09 19:12:32 -08001625 do_gettimeofday(&tv);
Jing Huangba816ea2010-10-18 17:10:50 -07001626 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
Jing Huang7725ccf2009-09-23 17:46:15 -07001627 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1628}
1629
1630static void
1631bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1632{
1633 struct bfi_ioc_ctrl_req_s disable_req;
1634
1635 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1636 bfa_ioc_portid(ioc));
1637 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1638}
1639
1640static void
1641bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1642{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001643 struct bfi_ioc_getattr_req_s attr_req;
Jing Huang7725ccf2009-09-23 17:46:15 -07001644
1645 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1646 bfa_ioc_portid(ioc));
1647 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1648 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1649}
1650
1651static void
1652bfa_ioc_hb_check(void *cbarg)
1653{
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001654 struct bfa_ioc_s *ioc = cbarg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001655 u32 hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07001656
Jing Huang53440262010-10-18 17:12:29 -07001657 hb_count = readl(ioc->ioc_regs.heartbeat);
Jing Huang7725ccf2009-09-23 17:46:15 -07001658 if (ioc->hb_count == hb_count) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001659 bfa_ioc_recover(ioc);
1660 return;
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001661 } else {
1662 ioc->hb_count = hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07001663 }
1664
1665 bfa_ioc_mbox_poll(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001666 bfa_hb_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001667}
1668
1669static void
1670bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1671{
Jing Huang53440262010-10-18 17:12:29 -07001672 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001673 bfa_hb_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001674}
1675
Jing Huang5fbe25c2010-10-18 17:17:23 -07001676/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001677 * Initiate a full firmware download.
Jing Huang7725ccf2009-09-23 17:46:15 -07001678 */
1679static void
1680bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001681 u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07001682{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001683 u32 *fwimg;
1684 u32 pgnum, pgoff;
1685 u32 loff = 0;
1686 u32 chunkno = 0;
1687 u32 i;
Krishna Gudipati11189202011-06-13 15:50:35 -07001688 u32 asicmode;
Jing Huang7725ccf2009-09-23 17:46:15 -07001689
Krishna Gudipati11189202011-06-13 15:50:35 -07001690 bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1691 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
Jing Huang7725ccf2009-09-23 17:46:15 -07001692
Maggie Zhangf7f738122010-12-09 19:08:43 -08001693 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1694 pgoff = PSS_SMEM_PGOFF(loff);
Jing Huang7725ccf2009-09-23 17:46:15 -07001695
Jing Huang53440262010-10-18 17:12:29 -07001696 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001697
Krishna Gudipati11189202011-06-13 15:50:35 -07001698 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001699
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001700 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1701 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
Krishna Gudipati11189202011-06-13 15:50:35 -07001702 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001703 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
Jing Huang7725ccf2009-09-23 17:46:15 -07001704 }
1705
Jing Huang5fbe25c2010-10-18 17:17:23 -07001706 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001707 * write smem
1708 */
1709 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
Krishna Gudipati881c1b32012-08-22 19:52:02 -07001710 cpu_to_le32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]));
Jing Huang7725ccf2009-09-23 17:46:15 -07001711
1712 loff += sizeof(u32);
1713
Jing Huang5fbe25c2010-10-18 17:17:23 -07001714 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001715 * handle page offset wrap around
1716 */
1717 loff = PSS_SMEM_PGOFF(loff);
1718 if (loff == 0) {
1719 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001720 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001721 }
1722 }
1723
Maggie Zhangf7f738122010-12-09 19:08:43 -08001724 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1725 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipati13cc20c2010-03-05 19:37:29 -08001726
1727 /*
Krishna Gudipati11189202011-06-13 15:50:35 -07001728 * Set boot type and device mode at the end.
1729 */
1730 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1731 ioc->port0_mode, ioc->port1_mode);
1732 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1733 swab32(asicmode));
1734 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
Jing Huang53440262010-10-18 17:12:29 -07001735 swab32(boot_type));
Krishna Gudipati11189202011-06-13 15:50:35 -07001736 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
Jing Huang53440262010-10-18 17:12:29 -07001737 swab32(boot_env));
Jing Huang7725ccf2009-09-23 17:46:15 -07001738}
1739
Jing Huang7725ccf2009-09-23 17:46:15 -07001740
Jing Huang5fbe25c2010-10-18 17:17:23 -07001741/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001742 * Update BFA configuration from firmware configuration.
1743 */
1744static void
1745bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1746{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001747 struct bfi_ioc_attr_s *attr = ioc->attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07001748
Jing Huangba816ea2010-10-18 17:10:50 -07001749 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1750 attr->card_type = be32_to_cpu(attr->card_type);
1751 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001752 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
Krishna Gudipatiea5d7c92012-09-21 17:25:02 -07001753 attr->mfg_year = be16_to_cpu(attr->mfg_year);
Jing Huang7725ccf2009-09-23 17:46:15 -07001754
1755 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1756}
1757
Jing Huang5fbe25c2010-10-18 17:17:23 -07001758/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001759 * Attach time initialization of mbox logic.
1760 */
1761static void
1762bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1763{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001764 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1765 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07001766
1767 INIT_LIST_HEAD(&mod->cmd_q);
1768 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1769 mod->mbhdlr[mc].cbfn = NULL;
1770 mod->mbhdlr[mc].cbarg = ioc->bfa;
1771 }
1772}
1773
Jing Huang5fbe25c2010-10-18 17:17:23 -07001774/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001775 * Mbox poll timer -- restarts any pending mailbox requests.
1776 */
1777static void
1778bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1779{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001780 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1781 struct bfa_mbox_cmd_s *cmd;
1782 u32 stat;
Jing Huang7725ccf2009-09-23 17:46:15 -07001783
Jing Huang5fbe25c2010-10-18 17:17:23 -07001784 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001785 * If no command pending, do nothing
1786 */
1787 if (list_empty(&mod->cmd_q))
1788 return;
1789
Jing Huang5fbe25c2010-10-18 17:17:23 -07001790 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001791 * If previous command is not yet fetched by firmware, do nothing
1792 */
Jing Huang53440262010-10-18 17:12:29 -07001793 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001794 if (stat)
1795 return;
1796
Jing Huang5fbe25c2010-10-18 17:17:23 -07001797 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001798 * Enqueue command to firmware.
1799 */
1800 bfa_q_deq(&mod->cmd_q, &cmd);
1801 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1802}
1803
Jing Huang5fbe25c2010-10-18 17:17:23 -07001804/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001805 * Cleanup any pending requests.
1806 */
1807static void
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001808bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -07001809{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001810 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1811 struct bfa_mbox_cmd_s *cmd;
Jing Huang7725ccf2009-09-23 17:46:15 -07001812
1813 while (!list_empty(&mod->cmd_q))
1814 bfa_q_deq(&mod->cmd_q, &cmd);
1815}
1816
Jing Huang5fbe25c2010-10-18 17:17:23 -07001817/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001818 * Read data from SMEM to host through PCI memmap
1819 *
1820 * @param[in] ioc memory for IOC
1821 * @param[in] tbuf app memory to store data from smem
1822 * @param[in] soff smem offset
1823 * @param[in] sz size of smem in bytes
Jing Huang7725ccf2009-09-23 17:46:15 -07001824 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001825static bfa_status_t
1826bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1827{
Maggie50444a32010-11-29 18:26:32 -08001828 u32 pgnum, loff;
1829 __be32 r32;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001830 int i, len;
1831 u32 *buf = tbuf;
1832
Maggie Zhangf7f738122010-12-09 19:08:43 -08001833 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1834 loff = PSS_SMEM_PGOFF(soff);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001835 bfa_trc(ioc, pgnum);
1836 bfa_trc(ioc, loff);
1837 bfa_trc(ioc, sz);
1838
1839 /*
1840 * Hold semaphore to serialize pll init and fwtrc.
1841 */
1842 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1843 bfa_trc(ioc, 0);
1844 return BFA_STATUS_FAILED;
1845 }
1846
Jing Huang53440262010-10-18 17:12:29 -07001847 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001848
1849 len = sz/sizeof(u32);
1850 bfa_trc(ioc, len);
1851 for (i = 0; i < len; i++) {
1852 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
Jing Huangba816ea2010-10-18 17:10:50 -07001853 buf[i] = be32_to_cpu(r32);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001854 loff += sizeof(u32);
1855
Jing Huang5fbe25c2010-10-18 17:17:23 -07001856 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001857 * handle page offset wrap around
1858 */
1859 loff = PSS_SMEM_PGOFF(loff);
1860 if (loff == 0) {
1861 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001862 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001863 }
1864 }
Maggie Zhangf7f738122010-12-09 19:08:43 -08001865 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1866 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001867 /*
1868 * release semaphore.
1869 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001870 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001871 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001872
1873 bfa_trc(ioc, pgnum);
1874 return BFA_STATUS_OK;
1875}
1876
Jing Huang5fbe25c2010-10-18 17:17:23 -07001877/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001878 * Clear SMEM data from host through PCI memmap
1879 *
1880 * @param[in] ioc memory for IOC
1881 * @param[in] soff smem offset
1882 * @param[in] sz size of smem in bytes
1883 */
1884static bfa_status_t
1885bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1886{
1887 int i, len;
1888 u32 pgnum, loff;
1889
Maggie Zhangf7f738122010-12-09 19:08:43 -08001890 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1891 loff = PSS_SMEM_PGOFF(soff);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001892 bfa_trc(ioc, pgnum);
1893 bfa_trc(ioc, loff);
1894 bfa_trc(ioc, sz);
1895
1896 /*
1897 * Hold semaphore to serialize pll init and fwtrc.
1898 */
1899 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1900 bfa_trc(ioc, 0);
1901 return BFA_STATUS_FAILED;
1902 }
1903
Jing Huang53440262010-10-18 17:12:29 -07001904 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001905
1906 len = sz/sizeof(u32); /* len in words */
1907 bfa_trc(ioc, len);
1908 for (i = 0; i < len; i++) {
1909 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1910 loff += sizeof(u32);
1911
Jing Huang5fbe25c2010-10-18 17:17:23 -07001912 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001913 * handle page offset wrap around
1914 */
1915 loff = PSS_SMEM_PGOFF(loff);
1916 if (loff == 0) {
1917 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001918 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001919 }
1920 }
Maggie Zhangf7f738122010-12-09 19:08:43 -08001921 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1922 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001923
1924 /*
1925 * release semaphore.
1926 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001927 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001928 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001929 bfa_trc(ioc, pgnum);
1930 return BFA_STATUS_OK;
1931}
1932
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001933static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001934bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1935{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001936 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1937
Jing Huang8f4bfad2010-12-26 21:50:10 -08001938 /*
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001939 * Notify driver and common modules registered for notification.
1940 */
1941 ioc->cbfn->hbfail_cbfn(ioc->bfa);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001942 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001943
1944 bfa_ioc_debug_save_ftrc(ioc);
1945
1946 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1947 "Heart Beat of IOC has failed\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -07001948 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001949
1950}
1951
1952static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001953bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1954{
1955 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
Jing Huang5fbe25c2010-10-18 17:17:23 -07001956 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001957 * Provide enable completion callback.
1958 */
1959 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Jing Huang88166242010-12-09 17:11:53 -08001960 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001961 "Running firmware version is incompatible "
1962 "with the driver version\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -07001963 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001964}
1965
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001966bfa_status_t
1967bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1968{
1969
1970 /*
1971 * Hold semaphore so that nobody can access the chip during init.
1972 */
1973 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1974
1975 bfa_ioc_pll_init_asic(ioc);
1976
1977 ioc->pllinit = BFA_TRUE;
Krishna Gudipati89196782012-03-13 17:38:56 -07001978
1979 /*
1980 * Initialize LMEM
1981 */
1982 bfa_ioc_lmem_init(ioc);
1983
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001984 /*
1985 * release semaphore.
1986 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001987 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001988 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001989
1990 return BFA_STATUS_OK;
1991}
Jing Huang7725ccf2009-09-23 17:46:15 -07001992
Jing Huang5fbe25c2010-10-18 17:17:23 -07001993/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001994 * Interface used by diag module to do firmware boot with memory test
1995 * as the entry vector.
1996 */
1997void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001998bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07001999{
Jing Huang7725ccf2009-09-23 17:46:15 -07002000 bfa_ioc_stats(ioc, ioc_boots);
2001
2002 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2003 return;
2004
Jing Huang5fbe25c2010-10-18 17:17:23 -07002005 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002006 * Initialize IOC state of all functions on a chip reset.
2007 */
Krishna Gudipati11189202011-06-13 15:50:35 -07002008 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2009 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2010 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002011 } else {
Krishna Gudipati11189202011-06-13 15:50:35 -07002012 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2013 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002014 }
2015
Jing Huang07b28382010-07-08 19:59:24 -07002016 bfa_ioc_msgflush(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002017 bfa_ioc_download_fw(ioc, boot_type, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07002018 bfa_ioc_lpu_start(ioc);
2019}
2020
Jing Huang5fbe25c2010-10-18 17:17:23 -07002021/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002022 * Enable/disable IOC failure auto recovery.
2023 */
2024void
2025bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2026{
Krishna Gudipati2f9b8852010-03-03 17:42:51 -08002027 bfa_auto_recover = auto_recover;
Jing Huang7725ccf2009-09-23 17:46:15 -07002028}
2029
2030
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002031
Jing Huang7725ccf2009-09-23 17:46:15 -07002032bfa_boolean_t
2033bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2034{
2035 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2036}
2037
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002038bfa_boolean_t
2039bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2040{
Jing Huang53440262010-10-18 17:12:29 -07002041 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002042
2043 return ((r32 != BFI_IOC_UNINIT) &&
2044 (r32 != BFI_IOC_INITING) &&
2045 (r32 != BFI_IOC_MEMTEST));
2046}
2047
Krishna Gudipati11189202011-06-13 15:50:35 -07002048bfa_boolean_t
Jing Huang7725ccf2009-09-23 17:46:15 -07002049bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2050{
Maggie50444a32010-11-29 18:26:32 -08002051 __be32 *msgp = mbmsg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002052 u32 r32;
2053 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07002054
Krishna Gudipati11189202011-06-13 15:50:35 -07002055 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2056 if ((r32 & 1) == 0)
2057 return BFA_FALSE;
2058
Jing Huang5fbe25c2010-10-18 17:17:23 -07002059 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002060 * read the MBOX msg
2061 */
2062 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2063 i++) {
Jing Huang53440262010-10-18 17:12:29 -07002064 r32 = readl(ioc->ioc_regs.lpu_mbox +
Jing Huang7725ccf2009-09-23 17:46:15 -07002065 i * sizeof(u32));
Jing Huangba816ea2010-10-18 17:10:50 -07002066 msgp[i] = cpu_to_be32(r32);
Jing Huang7725ccf2009-09-23 17:46:15 -07002067 }
2068
Jing Huang5fbe25c2010-10-18 17:17:23 -07002069 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002070 * turn off mailbox interrupt by clearing mailbox status
2071 */
Jing Huang53440262010-10-18 17:12:29 -07002072 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2073 readl(ioc->ioc_regs.lpu_mbox_cmd);
Krishna Gudipati11189202011-06-13 15:50:35 -07002074
2075 return BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -07002076}
2077
2078void
2079bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2080{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002081 union bfi_ioc_i2h_msg_u *msg;
2082 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
Jing Huang7725ccf2009-09-23 17:46:15 -07002083
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002084 msg = (union bfi_ioc_i2h_msg_u *) m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002085
2086 bfa_ioc_stats(ioc, ioc_isrs);
2087
2088 switch (msg->mh.msg_id) {
2089 case BFI_IOC_I2H_HBEAT:
2090 break;
2091
Jing Huang7725ccf2009-09-23 17:46:15 -07002092 case BFI_IOC_I2H_ENABLE_REPLY:
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002093 ioc->port_mode = ioc->port_mode_cfg =
2094 (enum bfa_mode_s)msg->fw_event.port_mode;
2095 ioc->ad_cap_bm = msg->fw_event.cap_bm;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002096 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -07002097 break;
2098
2099 case BFI_IOC_I2H_DISABLE_REPLY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002100 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -07002101 break;
2102
2103 case BFI_IOC_I2H_GETATTR_REPLY:
2104 bfa_ioc_getattr_reply(ioc);
2105 break;
2106
2107 default:
2108 bfa_trc(ioc, msg->mh.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08002109 WARN_ON(1);
Jing Huang7725ccf2009-09-23 17:46:15 -07002110 }
2111}
2112
Jing Huang5fbe25c2010-10-18 17:17:23 -07002113/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002114 * IOC attach time initialization and setup.
2115 *
2116 * @param[in] ioc memory for IOC
2117 * @param[in] bfa driver instance structure
Jing Huang7725ccf2009-09-23 17:46:15 -07002118 */
2119void
2120bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002121 struct bfa_timer_mod_s *timer_mod)
Jing Huang7725ccf2009-09-23 17:46:15 -07002122{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002123 ioc->bfa = bfa;
2124 ioc->cbfn = cbfn;
2125 ioc->timer_mod = timer_mod;
2126 ioc->fcmode = BFA_FALSE;
2127 ioc->pllinit = BFA_FALSE;
Jing Huang7725ccf2009-09-23 17:46:15 -07002128 ioc->dbg_fwsave_once = BFA_TRUE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002129 ioc->iocpf.ioc = ioc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002130
2131 bfa_ioc_mbox_attach(ioc);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002132 INIT_LIST_HEAD(&ioc->notify_q);
Jing Huang7725ccf2009-09-23 17:46:15 -07002133
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002134 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2135 bfa_fsm_send_event(ioc, IOC_E_RESET);
Jing Huang7725ccf2009-09-23 17:46:15 -07002136}
2137
Jing Huang5fbe25c2010-10-18 17:17:23 -07002138/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002139 * Driver detach time IOC cleanup.
2140 */
2141void
2142bfa_ioc_detach(struct bfa_ioc_s *ioc)
2143{
2144 bfa_fsm_send_event(ioc, IOC_E_DETACH);
Krishna Gudipati3350d982011-06-24 20:28:37 -07002145 INIT_LIST_HEAD(&ioc->notify_q);
Jing Huang7725ccf2009-09-23 17:46:15 -07002146}
2147
Jing Huang5fbe25c2010-10-18 17:17:23 -07002148/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002149 * Setup IOC PCI properties.
2150 *
2151 * @param[in] pcidev PCI device information for this IOC
2152 */
2153void
2154bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002155 enum bfi_pcifn_class clscode)
Jing Huang7725ccf2009-09-23 17:46:15 -07002156{
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002157 ioc->clscode = clscode;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002158 ioc->pcidev = *pcidev;
Krishna Gudipati11189202011-06-13 15:50:35 -07002159
2160 /*
2161 * Initialize IOC and device personality
2162 */
2163 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2164 ioc->asic_mode = BFI_ASIC_MODE_FC;
2165
2166 switch (pcidev->device_id) {
2167 case BFA_PCI_DEVICE_ID_FC_8G1P:
2168 case BFA_PCI_DEVICE_ID_FC_8G2P:
2169 ioc->asic_gen = BFI_ASIC_GEN_CB;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002170 ioc->fcmode = BFA_TRUE;
2171 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2172 ioc->ad_cap_bm = BFA_CM_HBA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002173 break;
2174
2175 case BFA_PCI_DEVICE_ID_CT:
2176 ioc->asic_gen = BFI_ASIC_GEN_CT;
2177 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2178 ioc->asic_mode = BFI_ASIC_MODE_ETH;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002179 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2180 ioc->ad_cap_bm = BFA_CM_CNA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002181 break;
2182
2183 case BFA_PCI_DEVICE_ID_CT_FC:
2184 ioc->asic_gen = BFI_ASIC_GEN_CT;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002185 ioc->fcmode = BFA_TRUE;
2186 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2187 ioc->ad_cap_bm = BFA_CM_HBA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002188 break;
2189
2190 case BFA_PCI_DEVICE_ID_CT2:
2191 ioc->asic_gen = BFI_ASIC_GEN_CT2;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002192 if (clscode == BFI_PCIFN_CLASS_FC &&
2193 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
Krishna Gudipati11189202011-06-13 15:50:35 -07002194 ioc->asic_mode = BFI_ASIC_MODE_FC16;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002195 ioc->fcmode = BFA_TRUE;
2196 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2197 ioc->ad_cap_bm = BFA_CM_HBA;
2198 } else {
Krishna Gudipati11189202011-06-13 15:50:35 -07002199 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002200 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2201 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2202 ioc->port_mode =
2203 ioc->port_mode_cfg = BFA_MODE_CNA;
2204 ioc->ad_cap_bm = BFA_CM_CNA;
2205 } else {
2206 ioc->port_mode =
2207 ioc->port_mode_cfg = BFA_MODE_NIC;
2208 ioc->ad_cap_bm = BFA_CM_NIC;
2209 }
Krishna Gudipati11189202011-06-13 15:50:35 -07002210 }
2211 break;
2212
2213 default:
2214 WARN_ON(1);
2215 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002216
Jing Huang5fbe25c2010-10-18 17:17:23 -07002217 /*
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002218 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2219 */
Krishna Gudipati11189202011-06-13 15:50:35 -07002220 if (ioc->asic_gen == BFI_ASIC_GEN_CB)
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002221 bfa_ioc_set_cb_hwif(ioc);
Krishna Gudipati11189202011-06-13 15:50:35 -07002222 else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2223 bfa_ioc_set_ct_hwif(ioc);
2224 else {
2225 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2226 bfa_ioc_set_ct2_hwif(ioc);
2227 bfa_ioc_ct2_poweron(ioc);
2228 }
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002229
Jing Huang7725ccf2009-09-23 17:46:15 -07002230 bfa_ioc_map_port(ioc);
2231 bfa_ioc_reg_init(ioc);
2232}
2233
Jing Huang5fbe25c2010-10-18 17:17:23 -07002234/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002235 * Initialize IOC dma memory
2236 *
2237 * @param[in] dm_kva kernel virtual address of IOC dma memory
2238 * @param[in] dm_pa physical address of IOC dma memory
2239 */
2240void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002241bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
Jing Huang7725ccf2009-09-23 17:46:15 -07002242{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002243 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002244 * dma memory for firmware attribute
2245 */
2246 ioc->attr_dma.kva = dm_kva;
2247 ioc->attr_dma.pa = dm_pa;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002248 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
Jing Huang7725ccf2009-09-23 17:46:15 -07002249}
2250
Jing Huang7725ccf2009-09-23 17:46:15 -07002251void
2252bfa_ioc_enable(struct bfa_ioc_s *ioc)
2253{
2254 bfa_ioc_stats(ioc, ioc_enables);
2255 ioc->dbg_fwsave_once = BFA_TRUE;
2256
2257 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2258}
2259
2260void
2261bfa_ioc_disable(struct bfa_ioc_s *ioc)
2262{
2263 bfa_ioc_stats(ioc, ioc_disables);
2264 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2265}
2266
Krishna Gudipati881c1b32012-08-22 19:52:02 -07002267void
2268bfa_ioc_suspend(struct bfa_ioc_s *ioc)
2269{
2270 ioc->dbg_fwsave_once = BFA_TRUE;
2271 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2272}
Jing Huang7725ccf2009-09-23 17:46:15 -07002273
Jing Huang5fbe25c2010-10-18 17:17:23 -07002274/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002275 * Initialize memory for saving firmware trace. Driver must initialize
2276 * trace memory before call bfa_ioc_enable().
2277 */
2278void
2279bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2280{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002281 ioc->dbg_fwsave = dbg_fwsave;
Krishna Gudipati881c1b32012-08-22 19:52:02 -07002282 ioc->dbg_fwsave_len = BFA_DBG_FWTRC_LEN;
Jing Huang7725ccf2009-09-23 17:46:15 -07002283}
2284
Jing Huang5fbe25c2010-10-18 17:17:23 -07002285/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002286 * Register mailbox message handler functions
2287 *
2288 * @param[in] ioc IOC instance
2289 * @param[in] mcfuncs message class handler functions
2290 */
2291void
2292bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2293{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002294 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2295 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002296
2297 for (mc = 0; mc < BFI_MC_MAX; mc++)
2298 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2299}
2300
Jing Huang5fbe25c2010-10-18 17:17:23 -07002301/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002302 * Register mailbox message handler function, to be called by common modules
2303 */
2304void
2305bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2306 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2307{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002308 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
Jing Huang7725ccf2009-09-23 17:46:15 -07002309
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002310 mod->mbhdlr[mc].cbfn = cbfn;
2311 mod->mbhdlr[mc].cbarg = cbarg;
Jing Huang7725ccf2009-09-23 17:46:15 -07002312}
2313
Jing Huang5fbe25c2010-10-18 17:17:23 -07002314/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002315 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2316 * Responsibility of caller to serialize
2317 *
2318 * @param[in] ioc IOC instance
2319 * @param[i] cmd Mailbox command
2320 */
2321void
2322bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2323{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002324 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2325 u32 stat;
Jing Huang7725ccf2009-09-23 17:46:15 -07002326
Jing Huang5fbe25c2010-10-18 17:17:23 -07002327 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002328 * If a previous command is pending, queue new command
2329 */
2330 if (!list_empty(&mod->cmd_q)) {
2331 list_add_tail(&cmd->qe, &mod->cmd_q);
2332 return;
2333 }
2334
Jing Huang5fbe25c2010-10-18 17:17:23 -07002335 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002336 * If mailbox is busy, queue command for poll timer
2337 */
Jing Huang53440262010-10-18 17:12:29 -07002338 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07002339 if (stat) {
2340 list_add_tail(&cmd->qe, &mod->cmd_q);
2341 return;
2342 }
2343
Jing Huang5fbe25c2010-10-18 17:17:23 -07002344 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002345 * mailbox is free -- queue command to firmware
2346 */
2347 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2348}
2349
Jing Huang5fbe25c2010-10-18 17:17:23 -07002350/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002351 * Handle mailbox interrupts
2352 */
2353void
2354bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2355{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002356 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2357 struct bfi_mbmsg_s m;
2358 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002359
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002360 if (bfa_ioc_msgget(ioc, &m)) {
2361 /*
2362 * Treat IOC message class as special.
2363 */
2364 mc = m.mh.msg_class;
2365 if (mc == BFI_MC_IOC) {
2366 bfa_ioc_isr(ioc, &m);
2367 return;
2368 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002369
Dan Carpenterfffa6922012-06-27 11:59:36 +03002370 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002371 return;
2372
2373 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
Jing Huang7725ccf2009-09-23 17:46:15 -07002374 }
2375
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002376 bfa_ioc_lpu_read_stat(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002377
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002378 /*
2379 * Try to send pending mailbox commands
2380 */
2381 bfa_ioc_mbox_poll(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002382}
2383
2384void
2385bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2386{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002387 bfa_ioc_stats(ioc, ioc_hbfails);
2388 ioc->stats.hb_count = ioc->hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07002389 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2390}
2391
Jing Huang5fbe25c2010-10-18 17:17:23 -07002392/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002393 * return true if IOC is disabled
2394 */
2395bfa_boolean_t
2396bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2397{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002398 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2399 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
Jing Huang7725ccf2009-09-23 17:46:15 -07002400}
2401
Jing Huang5fbe25c2010-10-18 17:17:23 -07002402/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002403 * return true if IOC firmware is different.
2404 */
2405bfa_boolean_t
2406bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2407{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002408 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2409 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2410 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
Jing Huang7725ccf2009-09-23 17:46:15 -07002411}
2412
2413#define bfa_ioc_state_disabled(__sm) \
2414 (((__sm) == BFI_IOC_UNINIT) || \
2415 ((__sm) == BFI_IOC_INITING) || \
2416 ((__sm) == BFI_IOC_HWINIT) || \
2417 ((__sm) == BFI_IOC_DISABLED) || \
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002418 ((__sm) == BFI_IOC_FAIL) || \
Jing Huang7725ccf2009-09-23 17:46:15 -07002419 ((__sm) == BFI_IOC_CFG_DISABLED))
2420
Jing Huang5fbe25c2010-10-18 17:17:23 -07002421/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002422 * Check if adapter is disabled -- both IOCs should be in a disabled
2423 * state.
2424 */
2425bfa_boolean_t
2426bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2427{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002428 u32 ioc_state;
Jing Huang7725ccf2009-09-23 17:46:15 -07002429
2430 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2431 return BFA_FALSE;
2432
Krishna Gudipati11189202011-06-13 15:50:35 -07002433 ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002434 if (!bfa_ioc_state_disabled(ioc_state))
2435 return BFA_FALSE;
2436
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002437 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
Krishna Gudipati11189202011-06-13 15:50:35 -07002438 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002439 if (!bfa_ioc_state_disabled(ioc_state))
2440 return BFA_FALSE;
2441 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002442
2443 return BFA_TRUE;
2444}
2445
Jing Huang8f4bfad2010-12-26 21:50:10 -08002446/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08002447 * Reset IOC fwstate registers.
2448 */
2449void
2450bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2451{
2452 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2453 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2454}
2455
Jing Huang7725ccf2009-09-23 17:46:15 -07002456#define BFA_MFG_NAME "Brocade"
2457void
2458bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2459 struct bfa_adapter_attr_s *ad_attr)
2460{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002461 struct bfi_ioc_attr_s *ioc_attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07002462
2463 ioc_attr = ioc->attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07002464
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002465 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2466 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2467 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2468 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
Jing Huang6a18b162010-10-18 17:08:54 -07002469 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
Jing Huang7725ccf2009-09-23 17:46:15 -07002470 sizeof(struct bfa_mfg_vpd_s));
2471
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002472 ad_attr->nports = bfa_ioc_get_nports(ioc);
2473 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002474
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002475 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2476 /* For now, model descr uses same model string */
2477 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
Jing Huang7725ccf2009-09-23 17:46:15 -07002478
Jing Huanged969322010-07-08 19:45:56 -07002479 ad_attr->card_type = ioc_attr->card_type;
2480 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2481
Jing Huang7725ccf2009-09-23 17:46:15 -07002482 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2483 ad_attr->prototype = 1;
2484 else
2485 ad_attr->prototype = 0;
2486
Maggie Zhangf7f738122010-12-09 19:08:43 -08002487 ad_attr->pwwn = ioc->attr->pwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002488 ad_attr->mac = bfa_ioc_get_mac(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002489
2490 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2491 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2492 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2493 ad_attr->asic_rev = ioc_attr->asic_rev;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002494
2495 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
Jing Huang7725ccf2009-09-23 17:46:15 -07002496
Krishna Gudipati11189202011-06-13 15:50:35 -07002497 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2498 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2499 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
Krishna Gudipatiea5d7c92012-09-21 17:25:02 -07002500 ad_attr->mfg_day = ioc_attr->mfg_day;
2501 ad_attr->mfg_month = ioc_attr->mfg_month;
2502 ad_attr->mfg_year = ioc_attr->mfg_year;
Jing Huang7725ccf2009-09-23 17:46:15 -07002503}
2504
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002505enum bfa_ioc_type_e
2506bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2507{
Krishna Gudipati11189202011-06-13 15:50:35 -07002508 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002509 return BFA_IOC_TYPE_LL;
Krishna Gudipati11189202011-06-13 15:50:35 -07002510
2511 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2512
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002513 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
Krishna Gudipati11189202011-06-13 15:50:35 -07002514 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002515}
2516
Jing Huang7725ccf2009-09-23 17:46:15 -07002517void
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002518bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2519{
Jing Huang6a18b162010-10-18 17:08:54 -07002520 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2521 memcpy((void *)serial_num,
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002522 (void *)ioc->attr->brcd_serialnum,
2523 BFA_ADAPTER_SERIAL_NUM_LEN);
2524}
2525
2526void
2527bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2528{
Jing Huang6a18b162010-10-18 17:08:54 -07002529 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2530 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002531}
2532
2533void
2534bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2535{
Jing Huangd4b671c2010-12-26 21:46:35 -08002536 WARN_ON(!chip_rev);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002537
Jing Huang6a18b162010-10-18 17:08:54 -07002538 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002539
2540 chip_rev[0] = 'R';
2541 chip_rev[1] = 'e';
2542 chip_rev[2] = 'v';
2543 chip_rev[3] = '-';
2544 chip_rev[4] = ioc->attr->asic_rev;
2545 chip_rev[5] = '\0';
2546}
2547
2548void
2549bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2550{
Jing Huang6a18b162010-10-18 17:08:54 -07002551 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2552 memcpy(optrom_ver, ioc->attr->optrom_version,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002553 BFA_VERSION_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002554}
2555
2556void
2557bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2558{
Jing Huang6a18b162010-10-18 17:08:54 -07002559 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2560 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002561}
2562
2563void
2564bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2565{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002566 struct bfi_ioc_attr_s *ioc_attr;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002567
Jing Huangd4b671c2010-12-26 21:46:35 -08002568 WARN_ON(!model);
Jing Huang6a18b162010-10-18 17:08:54 -07002569 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002570
2571 ioc_attr = ioc->attr;
2572
Krishna Gudipati10a07372011-06-24 20:23:38 -07002573 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002574 BFA_MFG_NAME, ioc_attr->card_type);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002575}
2576
2577enum bfa_ioc_state
2578bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2579{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002580 enum bfa_iocpf_state iocpf_st;
2581 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2582
2583 if (ioc_st == BFA_IOC_ENABLING ||
2584 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2585
2586 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2587
2588 switch (iocpf_st) {
2589 case BFA_IOCPF_SEMWAIT:
2590 ioc_st = BFA_IOC_SEMWAIT;
2591 break;
2592
2593 case BFA_IOCPF_HWINIT:
2594 ioc_st = BFA_IOC_HWINIT;
2595 break;
2596
2597 case BFA_IOCPF_FWMISMATCH:
2598 ioc_st = BFA_IOC_FWMISMATCH;
2599 break;
2600
2601 case BFA_IOCPF_FAIL:
2602 ioc_st = BFA_IOC_FAIL;
2603 break;
2604
2605 case BFA_IOCPF_INITFAIL:
2606 ioc_st = BFA_IOC_INITFAIL;
2607 break;
2608
2609 default:
2610 break;
2611 }
2612 }
2613
2614 return ioc_st;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002615}
2616
2617void
Jing Huang7725ccf2009-09-23 17:46:15 -07002618bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2619{
Jing Huang6a18b162010-10-18 17:08:54 -07002620 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
Jing Huang7725ccf2009-09-23 17:46:15 -07002621
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002622 ioc_attr->state = bfa_ioc_get_state(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002623 ioc_attr->port_id = ioc->port_id;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002624 ioc_attr->port_mode = ioc->port_mode;
2625 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2626 ioc_attr->cap_bm = ioc->ad_cap_bm;
Jing Huang7725ccf2009-09-23 17:46:15 -07002627
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002628 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002629
2630 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2631
2632 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2633 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002634 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
Jing Huang7725ccf2009-09-23 17:46:15 -07002635}
2636
Jing Huang7725ccf2009-09-23 17:46:15 -07002637mac_t
2638bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2639{
Jing Huang15b64a82010-07-08 19:48:12 -07002640 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002641 * Check the IOC type and return the appropriate MAC
Jing Huang15b64a82010-07-08 19:48:12 -07002642 */
2643 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002644 return ioc->attr->fcoe_mac;
Jing Huang15b64a82010-07-08 19:48:12 -07002645 else
2646 return ioc->attr->mac;
2647}
2648
Jing Huang15b64a82010-07-08 19:48:12 -07002649mac_t
2650bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2651{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002652 mac_t m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002653
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002654 m = ioc->attr->mfg_mac;
2655 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2656 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2657 else
2658 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2659 bfa_ioc_pcifn(ioc));
Jing Huang7725ccf2009-09-23 17:46:15 -07002660
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002661 return m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002662}
2663
Jing Huang5fbe25c2010-10-18 17:17:23 -07002664/*
Krishna Gudipati7826f302011-07-20 16:59:13 -07002665 * Send AEN notification
2666 */
2667void
2668bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2669{
2670 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2671 struct bfa_aen_entry_s *aen_entry;
2672 enum bfa_ioc_type_e ioc_type;
2673
2674 bfad_get_aen_entry(bfad, aen_entry);
2675 if (!aen_entry)
2676 return;
2677
2678 ioc_type = bfa_ioc_get_type(ioc);
2679 switch (ioc_type) {
2680 case BFA_IOC_TYPE_FC:
2681 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2682 break;
2683 case BFA_IOC_TYPE_FCoE:
2684 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2685 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2686 break;
2687 case BFA_IOC_TYPE_LL:
2688 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2689 break;
2690 default:
2691 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2692 break;
2693 }
2694
2695 /* Send the AEN notification */
2696 aen_entry->aen_data.ioc.ioc_type = ioc_type;
2697 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2698 BFA_AEN_CAT_IOC, event);
2699}
2700
2701/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002702 * Retrieve saved firmware trace from a prior IOC failure.
2703 */
2704bfa_status_t
2705bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2706{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002707 int tlen;
Jing Huang7725ccf2009-09-23 17:46:15 -07002708
2709 if (ioc->dbg_fwsave_len == 0)
2710 return BFA_STATUS_ENOFSAVE;
2711
2712 tlen = *trclen;
2713 if (tlen > ioc->dbg_fwsave_len)
2714 tlen = ioc->dbg_fwsave_len;
2715
Jing Huang6a18b162010-10-18 17:08:54 -07002716 memcpy(trcdata, ioc->dbg_fwsave, tlen);
Jing Huang7725ccf2009-09-23 17:46:15 -07002717 *trclen = tlen;
2718 return BFA_STATUS_OK;
2719}
2720
Krishna Gudipati738c9e62010-03-05 19:36:19 -08002721
Jing Huang5fbe25c2010-10-18 17:17:23 -07002722/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002723 * Retrieve saved firmware trace from a prior IOC failure.
2724 */
2725bfa_status_t
2726bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2727{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002728 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2729 int tlen;
2730 bfa_status_t status;
Jing Huang7725ccf2009-09-23 17:46:15 -07002731
2732 bfa_trc(ioc, *trclen);
2733
Jing Huang7725ccf2009-09-23 17:46:15 -07002734 tlen = *trclen;
2735 if (tlen > BFA_DBG_FWTRC_LEN)
2736 tlen = BFA_DBG_FWTRC_LEN;
Jing Huang7725ccf2009-09-23 17:46:15 -07002737
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002738 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2739 *trclen = tlen;
2740 return status;
2741}
Jing Huang7725ccf2009-09-23 17:46:15 -07002742
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002743static void
2744bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2745{
2746 struct bfa_mbox_cmd_s cmd;
2747 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
Jing Huang7725ccf2009-09-23 17:46:15 -07002748
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002749 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2750 bfa_ioc_portid(ioc));
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002751 req->clscode = cpu_to_be16(ioc->clscode);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002752 bfa_ioc_mbox_queue(ioc, &cmd);
2753}
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002754
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002755static void
2756bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2757{
2758 u32 fwsync_iter = 1000;
2759
2760 bfa_ioc_send_fwsync(ioc);
2761
Jing Huang5fbe25c2010-10-18 17:17:23 -07002762 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002763 * After sending a fw sync mbox command wait for it to
2764 * take effect. We will not wait for a response because
2765 * 1. fw_sync mbox cmd doesn't have a response.
2766 * 2. Even if we implement that, interrupts might not
2767 * be enabled when we call this function.
2768 * So, just keep checking if any mbox cmd is pending, and
2769 * after waiting for a reasonable amount of time, go ahead.
2770 * It is possible that fw has crashed and the mbox command
2771 * is never acknowledged.
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002772 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002773 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2774 fwsync_iter--;
2775}
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002776
Jing Huang5fbe25c2010-10-18 17:17:23 -07002777/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002778 * Dump firmware smem
2779 */
2780bfa_status_t
2781bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2782 u32 *offset, int *buflen)
2783{
2784 u32 loff;
2785 int dlen;
2786 bfa_status_t status;
2787 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002788
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002789 if (*offset >= smem_len) {
2790 *offset = *buflen = 0;
2791 return BFA_STATUS_EINVAL;
2792 }
2793
2794 loff = *offset;
2795 dlen = *buflen;
2796
Jing Huang5fbe25c2010-10-18 17:17:23 -07002797 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002798 * First smem read, sync smem before proceeding
2799 * No need to sync before reading every chunk.
2800 */
2801 if (loff == 0)
2802 bfa_ioc_fwsync(ioc);
2803
2804 if ((loff + dlen) >= smem_len)
2805 dlen = smem_len - loff;
2806
2807 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2808
2809 if (status != BFA_STATUS_OK) {
2810 *offset = *buflen = 0;
2811 return status;
2812 }
2813
2814 *offset += dlen;
2815
2816 if (*offset >= smem_len)
2817 *offset = 0;
2818
2819 *buflen = dlen;
2820
2821 return status;
2822}
2823
Jing Huang5fbe25c2010-10-18 17:17:23 -07002824/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002825 * Firmware statistics
2826 */
2827bfa_status_t
2828bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2829{
2830 u32 loff = BFI_IOC_FWSTATS_OFF + \
2831 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2832 int tlen;
2833 bfa_status_t status;
2834
2835 if (ioc->stats_busy) {
2836 bfa_trc(ioc, ioc->stats_busy);
2837 return BFA_STATUS_DEVBUSY;
2838 }
2839 ioc->stats_busy = BFA_TRUE;
2840
2841 tlen = sizeof(struct bfa_fw_stats_s);
2842 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2843
2844 ioc->stats_busy = BFA_FALSE;
2845 return status;
2846}
2847
2848bfa_status_t
2849bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2850{
2851 u32 loff = BFI_IOC_FWSTATS_OFF + \
2852 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2853 int tlen;
2854 bfa_status_t status;
2855
2856 if (ioc->stats_busy) {
2857 bfa_trc(ioc, ioc->stats_busy);
2858 return BFA_STATUS_DEVBUSY;
2859 }
2860 ioc->stats_busy = BFA_TRUE;
2861
2862 tlen = sizeof(struct bfa_fw_stats_s);
2863 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2864
2865 ioc->stats_busy = BFA_FALSE;
2866 return status;
Jing Huang7725ccf2009-09-23 17:46:15 -07002867}
2868
Jing Huang5fbe25c2010-10-18 17:17:23 -07002869/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002870 * Save firmware trace if configured.
2871 */
Krishna Gudipati881c1b32012-08-22 19:52:02 -07002872void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08002873bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -07002874{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002875 int tlen;
Jing Huang7725ccf2009-09-23 17:46:15 -07002876
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08002877 if (ioc->dbg_fwsave_once) {
2878 ioc->dbg_fwsave_once = BFA_FALSE;
2879 if (ioc->dbg_fwsave_len) {
2880 tlen = ioc->dbg_fwsave_len;
2881 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2882 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002883 }
2884}
2885
Jing Huang5fbe25c2010-10-18 17:17:23 -07002886/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002887 * Firmware failure detected. Start recovery actions.
2888 */
2889static void
2890bfa_ioc_recover(struct bfa_ioc_s *ioc)
2891{
Jing Huang7725ccf2009-09-23 17:46:15 -07002892 bfa_ioc_stats(ioc, ioc_hbfails);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002893 ioc->stats.hb_count = ioc->hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07002894 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2895}
2896
Jing Huang5fbe25c2010-10-18 17:17:23 -07002897/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -08002898 * BFA IOC PF private functions
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002899 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002900static void
2901bfa_iocpf_timeout(void *ioc_arg)
2902{
2903 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2904
2905 bfa_trc(ioc, 0);
2906 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2907}
2908
2909static void
2910bfa_iocpf_sem_timeout(void *ioc_arg)
2911{
2912 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2913
2914 bfa_ioc_hw_sem_get(ioc);
2915}
2916
Krishna Gudipati775c7742011-06-13 15:52:12 -07002917static void
2918bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2919{
2920 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2921
2922 bfa_trc(ioc, fwstate);
2923
2924 if (fwstate == BFI_IOC_DISABLED) {
2925 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2926 return;
2927 }
2928
Krishna Gudipati7ac83b12012-09-21 17:24:21 -07002929 if (ioc->iocpf.poll_time >= (3 * BFA_IOC_TOV))
Krishna Gudipati775c7742011-06-13 15:52:12 -07002930 bfa_iocpf_timeout(ioc);
2931 else {
2932 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
2933 bfa_iocpf_poll_timer_start(ioc);
2934 }
2935}
2936
2937static void
2938bfa_iocpf_poll_timeout(void *ioc_arg)
2939{
2940 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2941
2942 bfa_ioc_poll_fwinit(ioc);
2943}
2944
Jing Huang5fbe25c2010-10-18 17:17:23 -07002945/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002946 * bfa timer function
2947 */
2948void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002949bfa_timer_beat(struct bfa_timer_mod_s *mod)
2950{
2951 struct list_head *qh = &mod->timer_q;
2952 struct list_head *qe, *qe_next;
2953 struct bfa_timer_s *elem;
2954 struct list_head timedout_q;
2955
2956 INIT_LIST_HEAD(&timedout_q);
2957
2958 qe = bfa_q_next(qh);
2959
2960 while (qe != qh) {
2961 qe_next = bfa_q_next(qe);
2962
2963 elem = (struct bfa_timer_s *) qe;
2964 if (elem->timeout <= BFA_TIMER_FREQ) {
2965 elem->timeout = 0;
2966 list_del(&elem->qe);
2967 list_add_tail(&elem->qe, &timedout_q);
2968 } else {
2969 elem->timeout -= BFA_TIMER_FREQ;
2970 }
2971
2972 qe = qe_next; /* go to next elem */
2973 }
2974
2975 /*
2976 * Pop all the timeout entries
2977 */
2978 while (!list_empty(&timedout_q)) {
2979 bfa_q_deq(&timedout_q, &elem);
2980 elem->timercb(elem->arg);
2981 }
2982}
2983
Jing Huang5fbe25c2010-10-18 17:17:23 -07002984/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002985 * Should be called with lock protection
2986 */
2987void
2988bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2989 void (*timercb) (void *), void *arg, unsigned int timeout)
2990{
2991
Jing Huangd4b671c2010-12-26 21:46:35 -08002992 WARN_ON(timercb == NULL);
2993 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002994
2995 timer->timeout = timeout;
2996 timer->timercb = timercb;
2997 timer->arg = arg;
2998
2999 list_add_tail(&timer->qe, &mod->timer_q);
3000}
3001
Jing Huang5fbe25c2010-10-18 17:17:23 -07003002/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003003 * Should be called with lock protection
3004 */
3005void
3006bfa_timer_stop(struct bfa_timer_s *timer)
3007{
Jing Huangd4b671c2010-12-26 21:46:35 -08003008 WARN_ON(list_empty(&timer->qe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003009
3010 list_del(&timer->qe);
3011}
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07003012
3013/*
3014 * ASIC block related
3015 */
3016static void
3017bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3018{
3019 struct bfa_ablk_cfg_inst_s *cfg_inst;
3020 int i, j;
3021 u16 be16;
3022 u32 be32;
3023
3024 for (i = 0; i < BFA_ABLK_MAX; i++) {
3025 cfg_inst = &cfg->inst[i];
3026 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3027 be16 = cfg_inst->pf_cfg[j].pers;
3028 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3029 be16 = cfg_inst->pf_cfg[j].num_qpairs;
3030 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3031 be16 = cfg_inst->pf_cfg[j].num_vectors;
3032 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3033 be32 = cfg_inst->pf_cfg[j].bw;
3034 cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3035 }
3036 }
3037}
3038
3039static void
3040bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3041{
3042 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3043 struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3044 bfa_ablk_cbfn_t cbfn;
3045
3046 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3047 bfa_trc(ablk->ioc, msg->mh.msg_id);
3048
3049 switch (msg->mh.msg_id) {
3050 case BFI_ABLK_I2H_QUERY:
3051 if (rsp->status == BFA_STATUS_OK) {
3052 memcpy(ablk->cfg, ablk->dma_addr.kva,
3053 sizeof(struct bfa_ablk_cfg_s));
3054 bfa_ablk_config_swap(ablk->cfg);
3055 ablk->cfg = NULL;
3056 }
3057 break;
3058
3059 case BFI_ABLK_I2H_ADPT_CONFIG:
3060 case BFI_ABLK_I2H_PORT_CONFIG:
3061 /* update config port mode */
3062 ablk->ioc->port_mode_cfg = rsp->port_mode;
3063
3064 case BFI_ABLK_I2H_PF_DELETE:
3065 case BFI_ABLK_I2H_PF_UPDATE:
3066 case BFI_ABLK_I2H_OPTROM_ENABLE:
3067 case BFI_ABLK_I2H_OPTROM_DISABLE:
3068 /* No-op */
3069 break;
3070
3071 case BFI_ABLK_I2H_PF_CREATE:
3072 *(ablk->pcifn) = rsp->pcifn;
3073 ablk->pcifn = NULL;
3074 break;
3075
3076 default:
3077 WARN_ON(1);
3078 }
3079
3080 ablk->busy = BFA_FALSE;
3081 if (ablk->cbfn) {
3082 cbfn = ablk->cbfn;
3083 ablk->cbfn = NULL;
3084 cbfn(ablk->cbarg, rsp->status);
3085 }
3086}
3087
3088static void
3089bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3090{
3091 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3092
3093 bfa_trc(ablk->ioc, event);
3094
3095 switch (event) {
3096 case BFA_IOC_E_ENABLED:
3097 WARN_ON(ablk->busy != BFA_FALSE);
3098 break;
3099
3100 case BFA_IOC_E_DISABLED:
3101 case BFA_IOC_E_FAILED:
3102 /* Fail any pending requests */
3103 ablk->pcifn = NULL;
3104 if (ablk->busy) {
3105 if (ablk->cbfn)
3106 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3107 ablk->cbfn = NULL;
3108 ablk->busy = BFA_FALSE;
3109 }
3110 break;
3111
3112 default:
3113 WARN_ON(1);
3114 break;
3115 }
3116}
3117
3118u32
3119bfa_ablk_meminfo(void)
3120{
3121 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3122}
3123
3124void
3125bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3126{
3127 ablk->dma_addr.kva = dma_kva;
3128 ablk->dma_addr.pa = dma_pa;
3129}
3130
3131void
3132bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3133{
3134 ablk->ioc = ioc;
3135
3136 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
Krishna Gudipati3350d982011-06-24 20:28:37 -07003137 bfa_q_qe_init(&ablk->ioc_notify);
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07003138 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3139 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3140}
3141
3142bfa_status_t
3143bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3144 bfa_ablk_cbfn_t cbfn, void *cbarg)
3145{
3146 struct bfi_ablk_h2i_query_s *m;
3147
3148 WARN_ON(!ablk_cfg);
3149
3150 if (!bfa_ioc_is_operational(ablk->ioc)) {
3151 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3152 return BFA_STATUS_IOC_FAILURE;
3153 }
3154
3155 if (ablk->busy) {
3156 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3157 return BFA_STATUS_DEVBUSY;
3158 }
3159
3160 ablk->cfg = ablk_cfg;
3161 ablk->cbfn = cbfn;
3162 ablk->cbarg = cbarg;
3163 ablk->busy = BFA_TRUE;
3164
3165 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3166 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3167 bfa_ioc_portid(ablk->ioc));
3168 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3169 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3170
3171 return BFA_STATUS_OK;
3172}
3173
3174bfa_status_t
3175bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3176 u8 port, enum bfi_pcifn_class personality, int bw,
3177 bfa_ablk_cbfn_t cbfn, void *cbarg)
3178{
3179 struct bfi_ablk_h2i_pf_req_s *m;
3180
3181 if (!bfa_ioc_is_operational(ablk->ioc)) {
3182 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3183 return BFA_STATUS_IOC_FAILURE;
3184 }
3185
3186 if (ablk->busy) {
3187 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3188 return BFA_STATUS_DEVBUSY;
3189 }
3190
3191 ablk->pcifn = pcifn;
3192 ablk->cbfn = cbfn;
3193 ablk->cbarg = cbarg;
3194 ablk->busy = BFA_TRUE;
3195
3196 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3197 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3198 bfa_ioc_portid(ablk->ioc));
3199 m->pers = cpu_to_be16((u16)personality);
3200 m->bw = cpu_to_be32(bw);
3201 m->port = port;
3202 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3203
3204 return BFA_STATUS_OK;
3205}
3206
3207bfa_status_t
3208bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3209 bfa_ablk_cbfn_t cbfn, void *cbarg)
3210{
3211 struct bfi_ablk_h2i_pf_req_s *m;
3212
3213 if (!bfa_ioc_is_operational(ablk->ioc)) {
3214 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3215 return BFA_STATUS_IOC_FAILURE;
3216 }
3217
3218 if (ablk->busy) {
3219 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3220 return BFA_STATUS_DEVBUSY;
3221 }
3222
3223 ablk->cbfn = cbfn;
3224 ablk->cbarg = cbarg;
3225 ablk->busy = BFA_TRUE;
3226
3227 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3228 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3229 bfa_ioc_portid(ablk->ioc));
3230 m->pcifn = (u8)pcifn;
3231 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3232
3233 return BFA_STATUS_OK;
3234}
3235
3236bfa_status_t
3237bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3238 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3239{
3240 struct bfi_ablk_h2i_cfg_req_s *m;
3241
3242 if (!bfa_ioc_is_operational(ablk->ioc)) {
3243 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3244 return BFA_STATUS_IOC_FAILURE;
3245 }
3246
3247 if (ablk->busy) {
3248 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3249 return BFA_STATUS_DEVBUSY;
3250 }
3251
3252 ablk->cbfn = cbfn;
3253 ablk->cbarg = cbarg;
3254 ablk->busy = BFA_TRUE;
3255
3256 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3257 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3258 bfa_ioc_portid(ablk->ioc));
3259 m->mode = (u8)mode;
3260 m->max_pf = (u8)max_pf;
3261 m->max_vf = (u8)max_vf;
3262 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3263
3264 return BFA_STATUS_OK;
3265}
3266
3267bfa_status_t
3268bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3269 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3270{
3271 struct bfi_ablk_h2i_cfg_req_s *m;
3272
3273 if (!bfa_ioc_is_operational(ablk->ioc)) {
3274 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3275 return BFA_STATUS_IOC_FAILURE;
3276 }
3277
3278 if (ablk->busy) {
3279 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3280 return BFA_STATUS_DEVBUSY;
3281 }
3282
3283 ablk->cbfn = cbfn;
3284 ablk->cbarg = cbarg;
3285 ablk->busy = BFA_TRUE;
3286
3287 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3288 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3289 bfa_ioc_portid(ablk->ioc));
3290 m->port = (u8)port;
3291 m->mode = (u8)mode;
3292 m->max_pf = (u8)max_pf;
3293 m->max_vf = (u8)max_vf;
3294 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3295
3296 return BFA_STATUS_OK;
3297}
3298
3299bfa_status_t
3300bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3301 bfa_ablk_cbfn_t cbfn, void *cbarg)
3302{
3303 struct bfi_ablk_h2i_pf_req_s *m;
3304
3305 if (!bfa_ioc_is_operational(ablk->ioc)) {
3306 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3307 return BFA_STATUS_IOC_FAILURE;
3308 }
3309
3310 if (ablk->busy) {
3311 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3312 return BFA_STATUS_DEVBUSY;
3313 }
3314
3315 ablk->cbfn = cbfn;
3316 ablk->cbarg = cbarg;
3317 ablk->busy = BFA_TRUE;
3318
3319 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3320 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3321 bfa_ioc_portid(ablk->ioc));
3322 m->pcifn = (u8)pcifn;
3323 m->bw = cpu_to_be32(bw);
3324 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3325
3326 return BFA_STATUS_OK;
3327}
3328
3329bfa_status_t
3330bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3331{
3332 struct bfi_ablk_h2i_optrom_s *m;
3333
3334 if (!bfa_ioc_is_operational(ablk->ioc)) {
3335 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3336 return BFA_STATUS_IOC_FAILURE;
3337 }
3338
3339 if (ablk->busy) {
3340 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3341 return BFA_STATUS_DEVBUSY;
3342 }
3343
3344 ablk->cbfn = cbfn;
3345 ablk->cbarg = cbarg;
3346 ablk->busy = BFA_TRUE;
3347
3348 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3349 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3350 bfa_ioc_portid(ablk->ioc));
3351 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3352
3353 return BFA_STATUS_OK;
3354}
3355
3356bfa_status_t
3357bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3358{
3359 struct bfi_ablk_h2i_optrom_s *m;
3360
3361 if (!bfa_ioc_is_operational(ablk->ioc)) {
3362 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3363 return BFA_STATUS_IOC_FAILURE;
3364 }
3365
3366 if (ablk->busy) {
3367 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3368 return BFA_STATUS_DEVBUSY;
3369 }
3370
3371 ablk->cbfn = cbfn;
3372 ablk->cbarg = cbarg;
3373 ablk->busy = BFA_TRUE;
3374
3375 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3376 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3377 bfa_ioc_portid(ablk->ioc));
3378 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3379
3380 return BFA_STATUS_OK;
3381}
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003382
3383/*
3384 * SFP module specific
3385 */
3386
3387/* forward declarations */
3388static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3389static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3390static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3391 enum bfa_port_speed portspeed);
3392
3393static void
3394bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3395{
3396 bfa_trc(sfp, sfp->lock);
3397 if (sfp->cbfn)
3398 sfp->cbfn(sfp->cbarg, sfp->status);
3399 sfp->lock = 0;
3400 sfp->cbfn = NULL;
3401}
3402
3403static void
3404bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3405{
3406 bfa_trc(sfp, sfp->portspeed);
3407 if (sfp->media) {
3408 bfa_sfp_media_get(sfp);
3409 if (sfp->state_query_cbfn)
3410 sfp->state_query_cbfn(sfp->state_query_cbarg,
3411 sfp->status);
3412 sfp->media = NULL;
3413 }
3414
3415 if (sfp->portspeed) {
3416 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3417 if (sfp->state_query_cbfn)
3418 sfp->state_query_cbfn(sfp->state_query_cbarg,
3419 sfp->status);
3420 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3421 }
3422
3423 sfp->state_query_lock = 0;
3424 sfp->state_query_cbfn = NULL;
3425}
3426
3427/*
3428 * IOC event handler.
3429 */
3430static void
3431bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3432{
3433 struct bfa_sfp_s *sfp = sfp_arg;
3434
3435 bfa_trc(sfp, event);
3436 bfa_trc(sfp, sfp->lock);
3437 bfa_trc(sfp, sfp->state_query_lock);
3438
3439 switch (event) {
3440 case BFA_IOC_E_DISABLED:
3441 case BFA_IOC_E_FAILED:
3442 if (sfp->lock) {
3443 sfp->status = BFA_STATUS_IOC_FAILURE;
3444 bfa_cb_sfp_show(sfp);
3445 }
3446
3447 if (sfp->state_query_lock) {
3448 sfp->status = BFA_STATUS_IOC_FAILURE;
3449 bfa_cb_sfp_state_query(sfp);
3450 }
3451 break;
3452
3453 default:
3454 break;
3455 }
3456}
3457
3458/*
Krishna Gudipati7826f302011-07-20 16:59:13 -07003459 * SFP's State Change Notification post to AEN
3460 */
3461static void
3462bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3463{
3464 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3465 struct bfa_aen_entry_s *aen_entry;
3466 enum bfa_port_aen_event aen_evt = 0;
3467
3468 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3469 ((u64)rsp->event));
3470
3471 bfad_get_aen_entry(bfad, aen_entry);
3472 if (!aen_entry)
3473 return;
3474
3475 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3476 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3477 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3478
3479 switch (rsp->event) {
3480 case BFA_SFP_SCN_INSERTED:
3481 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3482 break;
3483 case BFA_SFP_SCN_REMOVED:
3484 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3485 break;
3486 case BFA_SFP_SCN_FAILED:
3487 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3488 break;
3489 case BFA_SFP_SCN_UNSUPPORT:
3490 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3491 break;
3492 case BFA_SFP_SCN_POM:
3493 aen_evt = BFA_PORT_AEN_SFP_POM;
3494 aen_entry->aen_data.port.level = rsp->pomlvl;
3495 break;
3496 default:
3497 bfa_trc(sfp, rsp->event);
3498 WARN_ON(1);
3499 }
3500
3501 /* Send the AEN notification */
3502 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3503 BFA_AEN_CAT_PORT, aen_evt);
3504}
3505
3506/*
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003507 * SFP get data send
3508 */
3509static void
3510bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3511{
3512 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3513
3514 bfa_trc(sfp, req->memtype);
3515
3516 /* build host command */
3517 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3518 bfa_ioc_portid(sfp->ioc));
3519
3520 /* send mbox cmd */
3521 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3522}
3523
3524/*
3525 * SFP is valid, read sfp data
3526 */
3527static void
3528bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3529{
3530 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3531
3532 WARN_ON(sfp->lock != 0);
3533 bfa_trc(sfp, sfp->state);
3534
3535 sfp->lock = 1;
3536 sfp->memtype = memtype;
3537 req->memtype = memtype;
3538
3539 /* Setup SG list */
3540 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3541
3542 bfa_sfp_getdata_send(sfp);
3543}
3544
3545/*
Krishna Gudipati7826f302011-07-20 16:59:13 -07003546 * SFP scn handler
3547 */
3548static void
3549bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3550{
3551 struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3552
3553 switch (rsp->event) {
3554 case BFA_SFP_SCN_INSERTED:
3555 sfp->state = BFA_SFP_STATE_INSERTED;
3556 sfp->data_valid = 0;
3557 bfa_sfp_scn_aen_post(sfp, rsp);
3558 break;
3559 case BFA_SFP_SCN_REMOVED:
3560 sfp->state = BFA_SFP_STATE_REMOVED;
3561 sfp->data_valid = 0;
3562 bfa_sfp_scn_aen_post(sfp, rsp);
3563 break;
3564 case BFA_SFP_SCN_FAILED:
3565 sfp->state = BFA_SFP_STATE_FAILED;
3566 sfp->data_valid = 0;
3567 bfa_sfp_scn_aen_post(sfp, rsp);
3568 break;
3569 case BFA_SFP_SCN_UNSUPPORT:
3570 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3571 bfa_sfp_scn_aen_post(sfp, rsp);
3572 if (!sfp->lock)
3573 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3574 break;
3575 case BFA_SFP_SCN_POM:
3576 bfa_sfp_scn_aen_post(sfp, rsp);
3577 break;
3578 case BFA_SFP_SCN_VALID:
3579 sfp->state = BFA_SFP_STATE_VALID;
3580 if (!sfp->lock)
3581 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3582 break;
3583 default:
3584 bfa_trc(sfp, rsp->event);
3585 WARN_ON(1);
3586 }
3587}
3588
3589/*
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003590 * SFP show complete
3591 */
3592static void
3593bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3594{
3595 struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3596
3597 if (!sfp->lock) {
3598 /*
3599 * receiving response after ioc failure
3600 */
3601 bfa_trc(sfp, sfp->lock);
3602 return;
3603 }
3604
3605 bfa_trc(sfp, rsp->status);
3606 if (rsp->status == BFA_STATUS_OK) {
3607 sfp->data_valid = 1;
3608 if (sfp->state == BFA_SFP_STATE_VALID)
3609 sfp->status = BFA_STATUS_OK;
3610 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3611 sfp->status = BFA_STATUS_SFP_UNSUPP;
3612 else
3613 bfa_trc(sfp, sfp->state);
3614 } else {
3615 sfp->data_valid = 0;
3616 sfp->status = rsp->status;
3617 /* sfpshow shouldn't change sfp state */
3618 }
3619
3620 bfa_trc(sfp, sfp->memtype);
3621 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3622 bfa_trc(sfp, sfp->data_valid);
3623 if (sfp->data_valid) {
3624 u32 size = sizeof(struct sfp_mem_s);
3625 u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3626 memcpy(des, sfp->dbuf_kva, size);
3627 }
3628 /*
3629 * Queue completion callback.
3630 */
3631 bfa_cb_sfp_show(sfp);
3632 } else
3633 sfp->lock = 0;
3634
3635 bfa_trc(sfp, sfp->state_query_lock);
3636 if (sfp->state_query_lock) {
3637 sfp->state = rsp->state;
3638 /* Complete callback */
3639 bfa_cb_sfp_state_query(sfp);
3640 }
3641}
3642
3643/*
3644 * SFP query fw sfp state
3645 */
3646static void
3647bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3648{
3649 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3650
3651 /* Should not be doing query if not in _INIT state */
3652 WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3653 WARN_ON(sfp->state_query_lock != 0);
3654 bfa_trc(sfp, sfp->state);
3655
3656 sfp->state_query_lock = 1;
3657 req->memtype = 0;
3658
3659 if (!sfp->lock)
3660 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3661}
3662
3663static void
3664bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3665{
3666 enum bfa_defs_sfp_media_e *media = sfp->media;
3667
3668 *media = BFA_SFP_MEDIA_UNKNOWN;
3669
3670 if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3671 *media = BFA_SFP_MEDIA_UNSUPPORT;
3672 else if (sfp->state == BFA_SFP_STATE_VALID) {
3673 union sfp_xcvr_e10g_code_u e10g;
3674 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3675 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3676 (sfpmem->srlid_base.xcvr[5] >> 1);
3677
3678 e10g.b = sfpmem->srlid_base.xcvr[0];
3679 bfa_trc(sfp, e10g.b);
3680 bfa_trc(sfp, xmtr_tech);
3681 /* check fc transmitter tech */
3682 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3683 (xmtr_tech & SFP_XMTR_TECH_CP) ||
3684 (xmtr_tech & SFP_XMTR_TECH_CA))
3685 *media = BFA_SFP_MEDIA_CU;
3686 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3687 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3688 *media = BFA_SFP_MEDIA_EL;
3689 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3690 (xmtr_tech & SFP_XMTR_TECH_LC))
3691 *media = BFA_SFP_MEDIA_LW;
3692 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3693 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3694 (xmtr_tech & SFP_XMTR_TECH_SA))
3695 *media = BFA_SFP_MEDIA_SW;
3696 /* Check 10G Ethernet Compilance code */
Jing Huang98cdfb42011-11-16 12:29:26 -08003697 else if (e10g.r.e10g_sr)
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003698 *media = BFA_SFP_MEDIA_SW;
Jing Huang98cdfb42011-11-16 12:29:26 -08003699 else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003700 *media = BFA_SFP_MEDIA_LW;
Jing Huang98cdfb42011-11-16 12:29:26 -08003701 else if (e10g.r.e10g_unall)
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003702 *media = BFA_SFP_MEDIA_UNKNOWN;
3703 else
3704 bfa_trc(sfp, 0);
3705 } else
3706 bfa_trc(sfp, sfp->state);
3707}
3708
3709static bfa_status_t
3710bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3711{
3712 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3713 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3714 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3715 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3716
3717 if (portspeed == BFA_PORT_SPEED_10GBPS) {
3718 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3719 return BFA_STATUS_OK;
3720 else {
3721 bfa_trc(sfp, e10g.b);
3722 return BFA_STATUS_UNSUPP_SPEED;
3723 }
3724 }
3725 if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3726 ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3727 ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3728 ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3729 ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3730 return BFA_STATUS_OK;
3731 else {
3732 bfa_trc(sfp, portspeed);
3733 bfa_trc(sfp, fc3.b);
3734 bfa_trc(sfp, e10g.b);
3735 return BFA_STATUS_UNSUPP_SPEED;
3736 }
3737}
3738
3739/*
3740 * SFP hmbox handler
3741 */
3742void
3743bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3744{
3745 struct bfa_sfp_s *sfp = sfparg;
3746
3747 switch (msg->mh.msg_id) {
3748 case BFI_SFP_I2H_SHOW:
3749 bfa_sfp_show_comp(sfp, msg);
3750 break;
3751
3752 case BFI_SFP_I2H_SCN:
Krishna Gudipati7826f302011-07-20 16:59:13 -07003753 bfa_sfp_scn(sfp, msg);
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003754 break;
3755
3756 default:
3757 bfa_trc(sfp, msg->mh.msg_id);
3758 WARN_ON(1);
3759 }
3760}
3761
3762/*
3763 * Return DMA memory needed by sfp module.
3764 */
3765u32
3766bfa_sfp_meminfo(void)
3767{
3768 return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3769}
3770
3771/*
3772 * Attach virtual and physical memory for SFP.
3773 */
3774void
3775bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3776 struct bfa_trc_mod_s *trcmod)
3777{
3778 sfp->dev = dev;
3779 sfp->ioc = ioc;
3780 sfp->trcmod = trcmod;
3781
3782 sfp->cbfn = NULL;
3783 sfp->cbarg = NULL;
3784 sfp->sfpmem = NULL;
3785 sfp->lock = 0;
3786 sfp->data_valid = 0;
3787 sfp->state = BFA_SFP_STATE_INIT;
3788 sfp->state_query_lock = 0;
3789 sfp->state_query_cbfn = NULL;
3790 sfp->state_query_cbarg = NULL;
3791 sfp->media = NULL;
3792 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3793 sfp->is_elb = BFA_FALSE;
3794
3795 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3796 bfa_q_qe_init(&sfp->ioc_notify);
3797 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3798 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3799}
3800
3801/*
3802 * Claim Memory for SFP
3803 */
3804void
3805bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3806{
3807 sfp->dbuf_kva = dm_kva;
3808 sfp->dbuf_pa = dm_pa;
3809 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3810
3811 dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3812 dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3813}
3814
3815/*
3816 * Show SFP eeprom content
3817 *
3818 * @param[in] sfp - bfa sfp module
3819 *
3820 * @param[out] sfpmem - sfp eeprom data
3821 *
3822 */
3823bfa_status_t
3824bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3825 bfa_cb_sfp_t cbfn, void *cbarg)
3826{
3827
3828 if (!bfa_ioc_is_operational(sfp->ioc)) {
3829 bfa_trc(sfp, 0);
3830 return BFA_STATUS_IOC_NON_OP;
3831 }
3832
3833 if (sfp->lock) {
3834 bfa_trc(sfp, 0);
3835 return BFA_STATUS_DEVBUSY;
3836 }
3837
3838 sfp->cbfn = cbfn;
3839 sfp->cbarg = cbarg;
3840 sfp->sfpmem = sfpmem;
3841
3842 bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3843 return BFA_STATUS_OK;
3844}
3845
3846/*
3847 * Return SFP Media type
3848 *
3849 * @param[in] sfp - bfa sfp module
3850 *
3851 * @param[out] media - port speed from user
3852 *
3853 */
3854bfa_status_t
3855bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3856 bfa_cb_sfp_t cbfn, void *cbarg)
3857{
3858 if (!bfa_ioc_is_operational(sfp->ioc)) {
3859 bfa_trc(sfp, 0);
3860 return BFA_STATUS_IOC_NON_OP;
3861 }
3862
3863 sfp->media = media;
3864 if (sfp->state == BFA_SFP_STATE_INIT) {
3865 if (sfp->state_query_lock) {
3866 bfa_trc(sfp, 0);
3867 return BFA_STATUS_DEVBUSY;
3868 } else {
3869 sfp->state_query_cbfn = cbfn;
3870 sfp->state_query_cbarg = cbarg;
3871 bfa_sfp_state_query(sfp);
3872 return BFA_STATUS_SFP_NOT_READY;
3873 }
3874 }
3875
3876 bfa_sfp_media_get(sfp);
3877 return BFA_STATUS_OK;
3878}
3879
3880/*
3881 * Check if user set port speed is allowed by the SFP
3882 *
3883 * @param[in] sfp - bfa sfp module
3884 * @param[in] portspeed - port speed from user
3885 *
3886 */
3887bfa_status_t
3888bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3889 bfa_cb_sfp_t cbfn, void *cbarg)
3890{
3891 WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3892
3893 if (!bfa_ioc_is_operational(sfp->ioc))
3894 return BFA_STATUS_IOC_NON_OP;
3895
3896 /* For Mezz card, all speed is allowed */
3897 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3898 return BFA_STATUS_OK;
3899
3900 /* Check SFP state */
3901 sfp->portspeed = portspeed;
3902 if (sfp->state == BFA_SFP_STATE_INIT) {
3903 if (sfp->state_query_lock) {
3904 bfa_trc(sfp, 0);
3905 return BFA_STATUS_DEVBUSY;
3906 } else {
3907 sfp->state_query_cbfn = cbfn;
3908 sfp->state_query_cbarg = cbarg;
3909 bfa_sfp_state_query(sfp);
3910 return BFA_STATUS_SFP_NOT_READY;
3911 }
3912 }
3913
3914 if (sfp->state == BFA_SFP_STATE_REMOVED ||
3915 sfp->state == BFA_SFP_STATE_FAILED) {
3916 bfa_trc(sfp, sfp->state);
3917 return BFA_STATUS_NO_SFP_DEV;
3918 }
3919
3920 if (sfp->state == BFA_SFP_STATE_INSERTED) {
3921 bfa_trc(sfp, sfp->state);
3922 return BFA_STATUS_DEVBUSY; /* sfp is reading data */
3923 }
3924
3925 /* For eloopback, all speed is allowed */
3926 if (sfp->is_elb)
3927 return BFA_STATUS_OK;
3928
3929 return bfa_sfp_speed_valid(sfp, portspeed);
3930}
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07003931
3932/*
3933 * Flash module specific
3934 */
3935
3936/*
3937 * FLASH DMA buffer should be big enough to hold both MFG block and
3938 * asic block(64k) at the same time and also should be 2k aligned to
3939 * avoid write segement to cross sector boundary.
3940 */
3941#define BFA_FLASH_SEG_SZ 2048
3942#define BFA_FLASH_DMA_BUF_SZ \
3943 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
3944
3945static void
Krishna Gudipati7826f302011-07-20 16:59:13 -07003946bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
3947 int inst, int type)
3948{
3949 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
3950 struct bfa_aen_entry_s *aen_entry;
3951
3952 bfad_get_aen_entry(bfad, aen_entry);
3953 if (!aen_entry)
3954 return;
3955
3956 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
3957 aen_entry->aen_data.audit.partition_inst = inst;
3958 aen_entry->aen_data.audit.partition_type = type;
3959
3960 /* Send the AEN notification */
3961 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
3962 BFA_AEN_CAT_AUDIT, event);
3963}
3964
3965static void
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07003966bfa_flash_cb(struct bfa_flash_s *flash)
3967{
3968 flash->op_busy = 0;
3969 if (flash->cbfn)
3970 flash->cbfn(flash->cbarg, flash->status);
3971}
3972
3973static void
3974bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
3975{
3976 struct bfa_flash_s *flash = cbarg;
3977
3978 bfa_trc(flash, event);
3979 switch (event) {
3980 case BFA_IOC_E_DISABLED:
3981 case BFA_IOC_E_FAILED:
3982 if (flash->op_busy) {
3983 flash->status = BFA_STATUS_IOC_FAILURE;
3984 flash->cbfn(flash->cbarg, flash->status);
3985 flash->op_busy = 0;
3986 }
3987 break;
3988
3989 default:
3990 break;
3991 }
3992}
3993
3994/*
3995 * Send flash attribute query request.
3996 *
3997 * @param[in] cbarg - callback argument
3998 */
3999static void
4000bfa_flash_query_send(void *cbarg)
4001{
4002 struct bfa_flash_s *flash = cbarg;
4003 struct bfi_flash_query_req_s *msg =
4004 (struct bfi_flash_query_req_s *) flash->mb.msg;
4005
4006 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4007 bfa_ioc_portid(flash->ioc));
4008 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4009 flash->dbuf_pa);
4010 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4011}
4012
4013/*
4014 * Send flash write request.
4015 *
4016 * @param[in] cbarg - callback argument
4017 */
4018static void
4019bfa_flash_write_send(struct bfa_flash_s *flash)
4020{
4021 struct bfi_flash_write_req_s *msg =
4022 (struct bfi_flash_write_req_s *) flash->mb.msg;
4023 u32 len;
4024
4025 msg->type = be32_to_cpu(flash->type);
4026 msg->instance = flash->instance;
4027 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4028 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4029 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4030 msg->length = be32_to_cpu(len);
4031
4032 /* indicate if it's the last msg of the whole write operation */
4033 msg->last = (len == flash->residue) ? 1 : 0;
4034
4035 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4036 bfa_ioc_portid(flash->ioc));
4037 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4038 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4039 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4040
4041 flash->residue -= len;
4042 flash->offset += len;
4043}
4044
4045/*
4046 * Send flash read request.
4047 *
4048 * @param[in] cbarg - callback argument
4049 */
4050static void
4051bfa_flash_read_send(void *cbarg)
4052{
4053 struct bfa_flash_s *flash = cbarg;
4054 struct bfi_flash_read_req_s *msg =
4055 (struct bfi_flash_read_req_s *) flash->mb.msg;
4056 u32 len;
4057
4058 msg->type = be32_to_cpu(flash->type);
4059 msg->instance = flash->instance;
4060 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4061 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4062 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4063 msg->length = be32_to_cpu(len);
4064 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4065 bfa_ioc_portid(flash->ioc));
4066 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4067 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4068}
4069
4070/*
4071 * Send flash erase request.
4072 *
4073 * @param[in] cbarg - callback argument
4074 */
4075static void
4076bfa_flash_erase_send(void *cbarg)
4077{
4078 struct bfa_flash_s *flash = cbarg;
4079 struct bfi_flash_erase_req_s *msg =
4080 (struct bfi_flash_erase_req_s *) flash->mb.msg;
4081
4082 msg->type = be32_to_cpu(flash->type);
4083 msg->instance = flash->instance;
4084 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4085 bfa_ioc_portid(flash->ioc));
4086 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4087}
4088
4089/*
4090 * Process flash response messages upon receiving interrupts.
4091 *
4092 * @param[in] flasharg - flash structure
4093 * @param[in] msg - message structure
4094 */
4095static void
4096bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4097{
4098 struct bfa_flash_s *flash = flasharg;
4099 u32 status;
4100
4101 union {
4102 struct bfi_flash_query_rsp_s *query;
4103 struct bfi_flash_erase_rsp_s *erase;
4104 struct bfi_flash_write_rsp_s *write;
4105 struct bfi_flash_read_rsp_s *read;
Krishna Gudipati7826f302011-07-20 16:59:13 -07004106 struct bfi_flash_event_s *event;
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004107 struct bfi_mbmsg_s *msg;
4108 } m;
4109
4110 m.msg = msg;
4111 bfa_trc(flash, msg->mh.msg_id);
4112
4113 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4114 /* receiving response after ioc failure */
4115 bfa_trc(flash, 0x9999);
4116 return;
4117 }
4118
4119 switch (msg->mh.msg_id) {
4120 case BFI_FLASH_I2H_QUERY_RSP:
4121 status = be32_to_cpu(m.query->status);
4122 bfa_trc(flash, status);
4123 if (status == BFA_STATUS_OK) {
4124 u32 i;
4125 struct bfa_flash_attr_s *attr, *f;
4126
4127 attr = (struct bfa_flash_attr_s *) flash->ubuf;
4128 f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4129 attr->status = be32_to_cpu(f->status);
4130 attr->npart = be32_to_cpu(f->npart);
4131 bfa_trc(flash, attr->status);
4132 bfa_trc(flash, attr->npart);
4133 for (i = 0; i < attr->npart; i++) {
4134 attr->part[i].part_type =
4135 be32_to_cpu(f->part[i].part_type);
4136 attr->part[i].part_instance =
4137 be32_to_cpu(f->part[i].part_instance);
4138 attr->part[i].part_off =
4139 be32_to_cpu(f->part[i].part_off);
4140 attr->part[i].part_size =
4141 be32_to_cpu(f->part[i].part_size);
4142 attr->part[i].part_len =
4143 be32_to_cpu(f->part[i].part_len);
4144 attr->part[i].part_status =
4145 be32_to_cpu(f->part[i].part_status);
4146 }
4147 }
4148 flash->status = status;
4149 bfa_flash_cb(flash);
4150 break;
4151 case BFI_FLASH_I2H_ERASE_RSP:
4152 status = be32_to_cpu(m.erase->status);
4153 bfa_trc(flash, status);
4154 flash->status = status;
4155 bfa_flash_cb(flash);
4156 break;
4157 case BFI_FLASH_I2H_WRITE_RSP:
4158 status = be32_to_cpu(m.write->status);
4159 bfa_trc(flash, status);
4160 if (status != BFA_STATUS_OK || flash->residue == 0) {
4161 flash->status = status;
4162 bfa_flash_cb(flash);
4163 } else {
4164 bfa_trc(flash, flash->offset);
4165 bfa_flash_write_send(flash);
4166 }
4167 break;
4168 case BFI_FLASH_I2H_READ_RSP:
4169 status = be32_to_cpu(m.read->status);
4170 bfa_trc(flash, status);
4171 if (status != BFA_STATUS_OK) {
4172 flash->status = status;
4173 bfa_flash_cb(flash);
4174 } else {
4175 u32 len = be32_to_cpu(m.read->length);
4176 bfa_trc(flash, flash->offset);
4177 bfa_trc(flash, len);
4178 memcpy(flash->ubuf + flash->offset,
4179 flash->dbuf_kva, len);
4180 flash->residue -= len;
4181 flash->offset += len;
4182 if (flash->residue == 0) {
4183 flash->status = status;
4184 bfa_flash_cb(flash);
4185 } else
4186 bfa_flash_read_send(flash);
4187 }
4188 break;
4189 case BFI_FLASH_I2H_BOOT_VER_RSP:
Krishna Gudipati7826f302011-07-20 16:59:13 -07004190 break;
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004191 case BFI_FLASH_I2H_EVENT:
Krishna Gudipati7826f302011-07-20 16:59:13 -07004192 status = be32_to_cpu(m.event->status);
4193 bfa_trc(flash, status);
4194 if (status == BFA_STATUS_BAD_FWCFG)
4195 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4196 else if (status == BFA_STATUS_INVALID_VENDOR) {
4197 u32 param;
4198 param = be32_to_cpu(m.event->param);
4199 bfa_trc(flash, param);
4200 bfa_ioc_aen_post(flash->ioc,
4201 BFA_IOC_AEN_INVALID_VENDOR);
4202 }
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004203 break;
4204
4205 default:
4206 WARN_ON(1);
4207 }
4208}
4209
4210/*
4211 * Flash memory info API.
4212 *
4213 * @param[in] mincfg - minimal cfg variable
4214 */
4215u32
4216bfa_flash_meminfo(bfa_boolean_t mincfg)
4217{
4218 /* min driver doesn't need flash */
4219 if (mincfg)
4220 return 0;
4221 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4222}
4223
4224/*
4225 * Flash attach API.
4226 *
4227 * @param[in] flash - flash structure
4228 * @param[in] ioc - ioc structure
4229 * @param[in] dev - device structure
4230 * @param[in] trcmod - trace module
4231 * @param[in] logmod - log module
4232 */
4233void
4234bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4235 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4236{
4237 flash->ioc = ioc;
4238 flash->trcmod = trcmod;
4239 flash->cbfn = NULL;
4240 flash->cbarg = NULL;
4241 flash->op_busy = 0;
4242
4243 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4244 bfa_q_qe_init(&flash->ioc_notify);
4245 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4246 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4247
4248 /* min driver doesn't need flash */
4249 if (mincfg) {
4250 flash->dbuf_kva = NULL;
4251 flash->dbuf_pa = 0;
4252 }
4253}
4254
4255/*
4256 * Claim memory for flash
4257 *
4258 * @param[in] flash - flash structure
4259 * @param[in] dm_kva - pointer to virtual memory address
4260 * @param[in] dm_pa - physical memory address
4261 * @param[in] mincfg - minimal cfg variable
4262 */
4263void
4264bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4265 bfa_boolean_t mincfg)
4266{
4267 if (mincfg)
4268 return;
4269
4270 flash->dbuf_kva = dm_kva;
4271 flash->dbuf_pa = dm_pa;
4272 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4273 dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4274 dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4275}
4276
4277/*
4278 * Get flash attribute.
4279 *
4280 * @param[in] flash - flash structure
4281 * @param[in] attr - flash attribute structure
4282 * @param[in] cbfn - callback function
4283 * @param[in] cbarg - callback argument
4284 *
4285 * Return status.
4286 */
4287bfa_status_t
4288bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4289 bfa_cb_flash_t cbfn, void *cbarg)
4290{
4291 bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4292
4293 if (!bfa_ioc_is_operational(flash->ioc))
4294 return BFA_STATUS_IOC_NON_OP;
4295
4296 if (flash->op_busy) {
4297 bfa_trc(flash, flash->op_busy);
4298 return BFA_STATUS_DEVBUSY;
4299 }
4300
4301 flash->op_busy = 1;
4302 flash->cbfn = cbfn;
4303 flash->cbarg = cbarg;
4304 flash->ubuf = (u8 *) attr;
4305 bfa_flash_query_send(flash);
4306
4307 return BFA_STATUS_OK;
4308}
4309
4310/*
4311 * Erase flash partition.
4312 *
4313 * @param[in] flash - flash structure
4314 * @param[in] type - flash partition type
4315 * @param[in] instance - flash partition instance
4316 * @param[in] cbfn - callback function
4317 * @param[in] cbarg - callback argument
4318 *
4319 * Return status.
4320 */
4321bfa_status_t
4322bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4323 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4324{
4325 bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4326 bfa_trc(flash, type);
4327 bfa_trc(flash, instance);
4328
4329 if (!bfa_ioc_is_operational(flash->ioc))
4330 return BFA_STATUS_IOC_NON_OP;
4331
4332 if (flash->op_busy) {
4333 bfa_trc(flash, flash->op_busy);
4334 return BFA_STATUS_DEVBUSY;
4335 }
4336
4337 flash->op_busy = 1;
4338 flash->cbfn = cbfn;
4339 flash->cbarg = cbarg;
4340 flash->type = type;
4341 flash->instance = instance;
4342
4343 bfa_flash_erase_send(flash);
Krishna Gudipati7826f302011-07-20 16:59:13 -07004344 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4345 instance, type);
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004346 return BFA_STATUS_OK;
4347}
4348
4349/*
4350 * Update flash partition.
4351 *
4352 * @param[in] flash - flash structure
4353 * @param[in] type - flash partition type
4354 * @param[in] instance - flash partition instance
4355 * @param[in] buf - update data buffer
4356 * @param[in] len - data buffer length
4357 * @param[in] offset - offset relative to the partition starting address
4358 * @param[in] cbfn - callback function
4359 * @param[in] cbarg - callback argument
4360 *
4361 * Return status.
4362 */
4363bfa_status_t
4364bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4365 u8 instance, void *buf, u32 len, u32 offset,
4366 bfa_cb_flash_t cbfn, void *cbarg)
4367{
4368 bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4369 bfa_trc(flash, type);
4370 bfa_trc(flash, instance);
4371 bfa_trc(flash, len);
4372 bfa_trc(flash, offset);
4373
4374 if (!bfa_ioc_is_operational(flash->ioc))
4375 return BFA_STATUS_IOC_NON_OP;
4376
4377 /*
4378 * 'len' must be in word (4-byte) boundary
4379 * 'offset' must be in sector (16kb) boundary
4380 */
4381 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4382 return BFA_STATUS_FLASH_BAD_LEN;
4383
4384 if (type == BFA_FLASH_PART_MFG)
4385 return BFA_STATUS_EINVAL;
4386
4387 if (flash->op_busy) {
4388 bfa_trc(flash, flash->op_busy);
4389 return BFA_STATUS_DEVBUSY;
4390 }
4391
4392 flash->op_busy = 1;
4393 flash->cbfn = cbfn;
4394 flash->cbarg = cbarg;
4395 flash->type = type;
4396 flash->instance = instance;
4397 flash->residue = len;
4398 flash->offset = 0;
4399 flash->addr_off = offset;
4400 flash->ubuf = buf;
4401
4402 bfa_flash_write_send(flash);
4403 return BFA_STATUS_OK;
4404}
4405
4406/*
4407 * Read flash partition.
4408 *
4409 * @param[in] flash - flash structure
4410 * @param[in] type - flash partition type
4411 * @param[in] instance - flash partition instance
4412 * @param[in] buf - read data buffer
4413 * @param[in] len - data buffer length
4414 * @param[in] offset - offset relative to the partition starting address
4415 * @param[in] cbfn - callback function
4416 * @param[in] cbarg - callback argument
4417 *
4418 * Return status.
4419 */
4420bfa_status_t
4421bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4422 u8 instance, void *buf, u32 len, u32 offset,
4423 bfa_cb_flash_t cbfn, void *cbarg)
4424{
4425 bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4426 bfa_trc(flash, type);
4427 bfa_trc(flash, instance);
4428 bfa_trc(flash, len);
4429 bfa_trc(flash, offset);
4430
4431 if (!bfa_ioc_is_operational(flash->ioc))
4432 return BFA_STATUS_IOC_NON_OP;
4433
4434 /*
4435 * 'len' must be in word (4-byte) boundary
4436 * 'offset' must be in sector (16kb) boundary
4437 */
4438 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4439 return BFA_STATUS_FLASH_BAD_LEN;
4440
4441 if (flash->op_busy) {
4442 bfa_trc(flash, flash->op_busy);
4443 return BFA_STATUS_DEVBUSY;
4444 }
4445
4446 flash->op_busy = 1;
4447 flash->cbfn = cbfn;
4448 flash->cbarg = cbarg;
4449 flash->type = type;
4450 flash->instance = instance;
4451 flash->residue = len;
4452 flash->offset = 0;
4453 flash->addr_off = offset;
4454 flash->ubuf = buf;
4455 bfa_flash_read_send(flash);
4456
4457 return BFA_STATUS_OK;
4458}
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004459
4460/*
4461 * DIAG module specific
4462 */
4463
4464#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
Krishna Gudipatibd5a0262012-03-13 17:41:02 -07004465#define CT2_BFA_DIAG_MEMTEST_TOV (9*30*1000) /* 4.5 min */
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004466
4467/* IOC event handler */
4468static void
4469bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4470{
4471 struct bfa_diag_s *diag = diag_arg;
4472
4473 bfa_trc(diag, event);
4474 bfa_trc(diag, diag->block);
4475 bfa_trc(diag, diag->fwping.lock);
4476 bfa_trc(diag, diag->tsensor.lock);
4477
4478 switch (event) {
4479 case BFA_IOC_E_DISABLED:
4480 case BFA_IOC_E_FAILED:
4481 if (diag->fwping.lock) {
4482 diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4483 diag->fwping.cbfn(diag->fwping.cbarg,
4484 diag->fwping.status);
4485 diag->fwping.lock = 0;
4486 }
4487
4488 if (diag->tsensor.lock) {
4489 diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4490 diag->tsensor.cbfn(diag->tsensor.cbarg,
4491 diag->tsensor.status);
4492 diag->tsensor.lock = 0;
4493 }
4494
4495 if (diag->block) {
4496 if (diag->timer_active) {
4497 bfa_timer_stop(&diag->timer);
4498 diag->timer_active = 0;
4499 }
4500
4501 diag->status = BFA_STATUS_IOC_FAILURE;
4502 diag->cbfn(diag->cbarg, diag->status);
4503 diag->block = 0;
4504 }
4505 break;
4506
4507 default:
4508 break;
4509 }
4510}
4511
4512static void
4513bfa_diag_memtest_done(void *cbarg)
4514{
4515 struct bfa_diag_s *diag = cbarg;
4516 struct bfa_ioc_s *ioc = diag->ioc;
4517 struct bfa_diag_memtest_result *res = diag->result;
4518 u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
4519 u32 pgnum, pgoff, i;
4520
4521 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4522 pgoff = PSS_SMEM_PGOFF(loff);
4523
4524 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4525
4526 for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4527 sizeof(u32)); i++) {
4528 /* read test result from smem */
4529 *((u32 *) res + i) =
4530 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4531 loff += sizeof(u32);
4532 }
4533
4534 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4535 bfa_ioc_reset_fwstate(ioc);
4536
4537 res->status = swab32(res->status);
4538 bfa_trc(diag, res->status);
4539
4540 if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4541 diag->status = BFA_STATUS_OK;
4542 else {
4543 diag->status = BFA_STATUS_MEMTEST_FAILED;
4544 res->addr = swab32(res->addr);
4545 res->exp = swab32(res->exp);
4546 res->act = swab32(res->act);
4547 res->err_status = swab32(res->err_status);
4548 res->err_status1 = swab32(res->err_status1);
4549 res->err_addr = swab32(res->err_addr);
4550 bfa_trc(diag, res->addr);
4551 bfa_trc(diag, res->exp);
4552 bfa_trc(diag, res->act);
4553 bfa_trc(diag, res->err_status);
4554 bfa_trc(diag, res->err_status1);
4555 bfa_trc(diag, res->err_addr);
4556 }
4557 diag->timer_active = 0;
4558 diag->cbfn(diag->cbarg, diag->status);
4559 diag->block = 0;
4560}
4561
4562/*
4563 * Firmware ping
4564 */
4565
4566/*
4567 * Perform DMA test directly
4568 */
4569static void
4570diag_fwping_send(struct bfa_diag_s *diag)
4571{
4572 struct bfi_diag_fwping_req_s *fwping_req;
4573 u32 i;
4574
4575 bfa_trc(diag, diag->fwping.dbuf_pa);
4576
4577 /* fill DMA area with pattern */
4578 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4579 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4580
4581 /* Fill mbox msg */
4582 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4583
4584 /* Setup SG list */
4585 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4586 diag->fwping.dbuf_pa);
4587 /* Set up dma count */
4588 fwping_req->count = cpu_to_be32(diag->fwping.count);
4589 /* Set up data pattern */
4590 fwping_req->data = diag->fwping.data;
4591
4592 /* build host command */
4593 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4594 bfa_ioc_portid(diag->ioc));
4595
4596 /* send mbox cmd */
4597 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4598}
4599
4600static void
4601diag_fwping_comp(struct bfa_diag_s *diag,
4602 struct bfi_diag_fwping_rsp_s *diag_rsp)
4603{
4604 u32 rsp_data = diag_rsp->data;
4605 u8 rsp_dma_status = diag_rsp->dma_status;
4606
4607 bfa_trc(diag, rsp_data);
4608 bfa_trc(diag, rsp_dma_status);
4609
4610 if (rsp_dma_status == BFA_STATUS_OK) {
4611 u32 i, pat;
4612 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4613 diag->fwping.data;
4614 /* Check mbox data */
4615 if (diag->fwping.data != rsp_data) {
4616 bfa_trc(diag, rsp_data);
4617 diag->fwping.result->dmastatus =
4618 BFA_STATUS_DATACORRUPTED;
4619 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4620 diag->fwping.cbfn(diag->fwping.cbarg,
4621 diag->fwping.status);
4622 diag->fwping.lock = 0;
4623 return;
4624 }
4625 /* Check dma pattern */
4626 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4627 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4628 bfa_trc(diag, i);
4629 bfa_trc(diag, pat);
4630 bfa_trc(diag,
4631 *((u32 *)diag->fwping.dbuf_kva + i));
4632 diag->fwping.result->dmastatus =
4633 BFA_STATUS_DATACORRUPTED;
4634 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4635 diag->fwping.cbfn(diag->fwping.cbarg,
4636 diag->fwping.status);
4637 diag->fwping.lock = 0;
4638 return;
4639 }
4640 }
4641 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4642 diag->fwping.status = BFA_STATUS_OK;
4643 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4644 diag->fwping.lock = 0;
4645 } else {
4646 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4647 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4648 diag->fwping.lock = 0;
4649 }
4650}
4651
4652/*
4653 * Temperature Sensor
4654 */
4655
4656static void
4657diag_tempsensor_send(struct bfa_diag_s *diag)
4658{
4659 struct bfi_diag_ts_req_s *msg;
4660
4661 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4662 bfa_trc(diag, msg->temp);
4663 /* build host command */
4664 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4665 bfa_ioc_portid(diag->ioc));
4666 /* send mbox cmd */
4667 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4668}
4669
4670static void
4671diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4672{
4673 if (!diag->tsensor.lock) {
4674 /* receiving response after ioc failure */
4675 bfa_trc(diag, diag->tsensor.lock);
4676 return;
4677 }
4678
4679 /*
4680 * ASIC junction tempsensor is a reg read operation
4681 * it will always return OK
4682 */
4683 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4684 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4685 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4686 diag->tsensor.temp->status = BFA_STATUS_OK;
4687
4688 if (rsp->ts_brd) {
4689 if (rsp->status == BFA_STATUS_OK) {
4690 diag->tsensor.temp->brd_temp =
4691 be16_to_cpu(rsp->brd_temp);
4692 } else {
4693 bfa_trc(diag, rsp->status);
4694 diag->tsensor.temp->brd_temp = 0;
4695 diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4696 }
4697 }
4698 bfa_trc(diag, rsp->ts_junc);
4699 bfa_trc(diag, rsp->temp);
4700 bfa_trc(diag, rsp->ts_brd);
4701 bfa_trc(diag, rsp->brd_temp);
4702 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4703 diag->tsensor.lock = 0;
4704}
4705
4706/*
4707 * LED Test command
4708 */
4709static void
4710diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4711{
4712 struct bfi_diag_ledtest_req_s *msg;
4713
4714 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4715 /* build host command */
4716 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4717 bfa_ioc_portid(diag->ioc));
4718
4719 /*
4720 * convert the freq from N blinks per 10 sec to
4721 * crossbow ontime value. We do it here because division is need
4722 */
4723 if (ledtest->freq)
4724 ledtest->freq = 500 / ledtest->freq;
4725
4726 if (ledtest->freq == 0)
4727 ledtest->freq = 1;
4728
4729 bfa_trc(diag, ledtest->freq);
4730 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4731 msg->cmd = (u8) ledtest->cmd;
4732 msg->color = (u8) ledtest->color;
4733 msg->portid = bfa_ioc_portid(diag->ioc);
4734 msg->led = ledtest->led;
4735 msg->freq = cpu_to_be16(ledtest->freq);
4736
4737 /* send mbox cmd */
4738 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4739}
4740
4741static void
Krishna Gudipati89196782012-03-13 17:38:56 -07004742diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004743{
4744 bfa_trc(diag, diag->ledtest.lock);
4745 diag->ledtest.lock = BFA_FALSE;
4746 /* no bfa_cb_queue is needed because driver is not waiting */
4747}
4748
4749/*
4750 * Port beaconing
4751 */
4752static void
4753diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4754{
4755 struct bfi_diag_portbeacon_req_s *msg;
4756
4757 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4758 /* build host command */
4759 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4760 bfa_ioc_portid(diag->ioc));
4761 msg->beacon = beacon;
4762 msg->period = cpu_to_be32(sec);
4763 /* send mbox cmd */
4764 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4765}
4766
4767static void
4768diag_portbeacon_comp(struct bfa_diag_s *diag)
4769{
4770 bfa_trc(diag, diag->beacon.state);
4771 diag->beacon.state = BFA_FALSE;
4772 if (diag->cbfn_beacon)
4773 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4774}
4775
4776/*
4777 * Diag hmbox handler
4778 */
4779void
4780bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4781{
4782 struct bfa_diag_s *diag = diagarg;
4783
4784 switch (msg->mh.msg_id) {
4785 case BFI_DIAG_I2H_PORTBEACON:
4786 diag_portbeacon_comp(diag);
4787 break;
4788 case BFI_DIAG_I2H_FWPING:
4789 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4790 break;
4791 case BFI_DIAG_I2H_TEMPSENSOR:
4792 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4793 break;
4794 case BFI_DIAG_I2H_LEDTEST:
4795 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4796 break;
4797 default:
4798 bfa_trc(diag, msg->mh.msg_id);
4799 WARN_ON(1);
4800 }
4801}
4802
4803/*
4804 * Gen RAM Test
4805 *
4806 * @param[in] *diag - diag data struct
4807 * @param[in] *memtest - mem test params input from upper layer,
4808 * @param[in] pattern - mem test pattern
4809 * @param[in] *result - mem test result
4810 * @param[in] cbfn - mem test callback functioin
4811 * @param[in] cbarg - callback functioin arg
4812 *
4813 * @param[out]
4814 */
4815bfa_status_t
4816bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4817 u32 pattern, struct bfa_diag_memtest_result *result,
4818 bfa_cb_diag_t cbfn, void *cbarg)
4819{
Krishna Gudipatibd5a0262012-03-13 17:41:02 -07004820 u32 memtest_tov;
4821
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004822 bfa_trc(diag, pattern);
4823
4824 if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4825 return BFA_STATUS_ADAPTER_ENABLED;
4826
4827 /* check to see if there is another destructive diag cmd running */
4828 if (diag->block) {
4829 bfa_trc(diag, diag->block);
4830 return BFA_STATUS_DEVBUSY;
4831 } else
4832 diag->block = 1;
4833
4834 diag->result = result;
4835 diag->cbfn = cbfn;
4836 diag->cbarg = cbarg;
4837
4838 /* download memtest code and take LPU0 out of reset */
4839 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4840
Krishna Gudipatibd5a0262012-03-13 17:41:02 -07004841 memtest_tov = (bfa_ioc_asic_gen(diag->ioc) == BFI_ASIC_GEN_CT2) ?
4842 CT2_BFA_DIAG_MEMTEST_TOV : BFA_DIAG_MEMTEST_TOV;
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004843 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
Krishna Gudipatibd5a0262012-03-13 17:41:02 -07004844 bfa_diag_memtest_done, diag, memtest_tov);
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004845 diag->timer_active = 1;
4846 return BFA_STATUS_OK;
4847}
4848
4849/*
4850 * DIAG firmware ping command
4851 *
4852 * @param[in] *diag - diag data struct
4853 * @param[in] cnt - dma loop count for testing PCIE
4854 * @param[in] data - data pattern to pass in fw
4855 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4856 * @param[in] cbfn - callback function
4857 * @param[in] *cbarg - callback functioin arg
4858 *
4859 * @param[out]
4860 */
4861bfa_status_t
4862bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4863 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4864 void *cbarg)
4865{
4866 bfa_trc(diag, cnt);
4867 bfa_trc(diag, data);
4868
4869 if (!bfa_ioc_is_operational(diag->ioc))
4870 return BFA_STATUS_IOC_NON_OP;
4871
4872 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4873 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4874 return BFA_STATUS_CMD_NOTSUPP;
4875
4876 /* check to see if there is another destructive diag cmd running */
4877 if (diag->block || diag->fwping.lock) {
4878 bfa_trc(diag, diag->block);
4879 bfa_trc(diag, diag->fwping.lock);
4880 return BFA_STATUS_DEVBUSY;
4881 }
4882
4883 /* Initialization */
4884 diag->fwping.lock = 1;
4885 diag->fwping.cbfn = cbfn;
4886 diag->fwping.cbarg = cbarg;
4887 diag->fwping.result = result;
4888 diag->fwping.data = data;
4889 diag->fwping.count = cnt;
4890
4891 /* Init test results */
4892 diag->fwping.result->data = 0;
4893 diag->fwping.result->status = BFA_STATUS_OK;
4894
4895 /* kick off the first ping */
4896 diag_fwping_send(diag);
4897 return BFA_STATUS_OK;
4898}
4899
4900/*
4901 * Read Temperature Sensor
4902 *
4903 * @param[in] *diag - diag data struct
4904 * @param[in] *result - pt to bfa_diag_temp_t data struct
4905 * @param[in] cbfn - callback function
4906 * @param[in] *cbarg - callback functioin arg
4907 *
4908 * @param[out]
4909 */
4910bfa_status_t
4911bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4912 struct bfa_diag_results_tempsensor_s *result,
4913 bfa_cb_diag_t cbfn, void *cbarg)
4914{
4915 /* check to see if there is a destructive diag cmd running */
4916 if (diag->block || diag->tsensor.lock) {
4917 bfa_trc(diag, diag->block);
4918 bfa_trc(diag, diag->tsensor.lock);
4919 return BFA_STATUS_DEVBUSY;
4920 }
4921
4922 if (!bfa_ioc_is_operational(diag->ioc))
4923 return BFA_STATUS_IOC_NON_OP;
4924
4925 /* Init diag mod params */
4926 diag->tsensor.lock = 1;
4927 diag->tsensor.temp = result;
4928 diag->tsensor.cbfn = cbfn;
4929 diag->tsensor.cbarg = cbarg;
4930
4931 /* Send msg to fw */
4932 diag_tempsensor_send(diag);
4933
4934 return BFA_STATUS_OK;
4935}
4936
4937/*
4938 * LED Test command
4939 *
4940 * @param[in] *diag - diag data struct
4941 * @param[in] *ledtest - pt to ledtest data structure
4942 *
4943 * @param[out]
4944 */
4945bfa_status_t
4946bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4947{
4948 bfa_trc(diag, ledtest->cmd);
4949
4950 if (!bfa_ioc_is_operational(diag->ioc))
4951 return BFA_STATUS_IOC_NON_OP;
4952
4953 if (diag->beacon.state)
4954 return BFA_STATUS_BEACON_ON;
4955
4956 if (diag->ledtest.lock)
4957 return BFA_STATUS_LEDTEST_OP;
4958
4959 /* Send msg to fw */
4960 diag->ledtest.lock = BFA_TRUE;
4961 diag_ledtest_send(diag, ledtest);
4962
4963 return BFA_STATUS_OK;
4964}
4965
4966/*
4967 * Port beaconing command
4968 *
4969 * @param[in] *diag - diag data struct
4970 * @param[in] beacon - port beaconing 1:ON 0:OFF
4971 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
4972 * @param[in] sec - beaconing duration in seconds
4973 *
4974 * @param[out]
4975 */
4976bfa_status_t
4977bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
4978 bfa_boolean_t link_e2e_beacon, uint32_t sec)
4979{
4980 bfa_trc(diag, beacon);
4981 bfa_trc(diag, link_e2e_beacon);
4982 bfa_trc(diag, sec);
4983
4984 if (!bfa_ioc_is_operational(diag->ioc))
4985 return BFA_STATUS_IOC_NON_OP;
4986
4987 if (diag->ledtest.lock)
4988 return BFA_STATUS_LEDTEST_OP;
4989
4990 if (diag->beacon.state && beacon) /* beacon alread on */
4991 return BFA_STATUS_BEACON_ON;
4992
4993 diag->beacon.state = beacon;
4994 diag->beacon.link_e2e = link_e2e_beacon;
4995 if (diag->cbfn_beacon)
4996 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
4997
4998 /* Send msg to fw */
4999 diag_portbeacon_send(diag, beacon, sec);
5000
5001 return BFA_STATUS_OK;
5002}
5003
5004/*
5005 * Return DMA memory needed by diag module.
5006 */
5007u32
5008bfa_diag_meminfo(void)
5009{
5010 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5011}
5012
5013/*
5014 * Attach virtual and physical memory for Diag.
5015 */
5016void
5017bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5018 bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5019{
5020 diag->dev = dev;
5021 diag->ioc = ioc;
5022 diag->trcmod = trcmod;
5023
5024 diag->block = 0;
5025 diag->cbfn = NULL;
5026 diag->cbarg = NULL;
5027 diag->result = NULL;
5028 diag->cbfn_beacon = cbfn_beacon;
5029
5030 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5031 bfa_q_qe_init(&diag->ioc_notify);
5032 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5033 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5034}
5035
5036void
5037bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5038{
5039 diag->fwping.dbuf_kva = dm_kva;
5040 diag->fwping.dbuf_pa = dm_pa;
5041 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5042}
Krishna Gudipati3350d982011-06-24 20:28:37 -07005043
5044/*
5045 * PHY module specific
5046 */
5047#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5048#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5049
5050static void
5051bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5052{
5053 int i, m = sz >> 2;
5054
5055 for (i = 0; i < m; i++)
5056 obuf[i] = be32_to_cpu(ibuf[i]);
5057}
5058
5059static bfa_boolean_t
5060bfa_phy_present(struct bfa_phy_s *phy)
5061{
5062 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5063}
5064
5065static void
5066bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5067{
5068 struct bfa_phy_s *phy = cbarg;
5069
5070 bfa_trc(phy, event);
5071
5072 switch (event) {
5073 case BFA_IOC_E_DISABLED:
5074 case BFA_IOC_E_FAILED:
5075 if (phy->op_busy) {
5076 phy->status = BFA_STATUS_IOC_FAILURE;
5077 phy->cbfn(phy->cbarg, phy->status);
5078 phy->op_busy = 0;
5079 }
5080 break;
5081
5082 default:
5083 break;
5084 }
5085}
5086
5087/*
5088 * Send phy attribute query request.
5089 *
5090 * @param[in] cbarg - callback argument
5091 */
5092static void
5093bfa_phy_query_send(void *cbarg)
5094{
5095 struct bfa_phy_s *phy = cbarg;
5096 struct bfi_phy_query_req_s *msg =
5097 (struct bfi_phy_query_req_s *) phy->mb.msg;
5098
5099 msg->instance = phy->instance;
5100 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5101 bfa_ioc_portid(phy->ioc));
5102 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5103 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5104}
5105
5106/*
5107 * Send phy write request.
5108 *
5109 * @param[in] cbarg - callback argument
5110 */
5111static void
5112bfa_phy_write_send(void *cbarg)
5113{
5114 struct bfa_phy_s *phy = cbarg;
5115 struct bfi_phy_write_req_s *msg =
5116 (struct bfi_phy_write_req_s *) phy->mb.msg;
5117 u32 len;
5118 u16 *buf, *dbuf;
5119 int i, sz;
5120
5121 msg->instance = phy->instance;
5122 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5123 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5124 phy->residue : BFA_PHY_DMA_BUF_SZ;
5125 msg->length = cpu_to_be32(len);
5126
5127 /* indicate if it's the last msg of the whole write operation */
5128 msg->last = (len == phy->residue) ? 1 : 0;
5129
5130 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5131 bfa_ioc_portid(phy->ioc));
5132 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5133
5134 buf = (u16 *) (phy->ubuf + phy->offset);
5135 dbuf = (u16 *)phy->dbuf_kva;
5136 sz = len >> 1;
5137 for (i = 0; i < sz; i++)
5138 buf[i] = cpu_to_be16(dbuf[i]);
5139
5140 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5141
5142 phy->residue -= len;
5143 phy->offset += len;
5144}
5145
5146/*
5147 * Send phy read request.
5148 *
5149 * @param[in] cbarg - callback argument
5150 */
5151static void
5152bfa_phy_read_send(void *cbarg)
5153{
5154 struct bfa_phy_s *phy = cbarg;
5155 struct bfi_phy_read_req_s *msg =
5156 (struct bfi_phy_read_req_s *) phy->mb.msg;
5157 u32 len;
5158
5159 msg->instance = phy->instance;
5160 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5161 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5162 phy->residue : BFA_PHY_DMA_BUF_SZ;
5163 msg->length = cpu_to_be32(len);
5164 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5165 bfa_ioc_portid(phy->ioc));
5166 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5167 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5168}
5169
5170/*
5171 * Send phy stats request.
5172 *
5173 * @param[in] cbarg - callback argument
5174 */
5175static void
5176bfa_phy_stats_send(void *cbarg)
5177{
5178 struct bfa_phy_s *phy = cbarg;
5179 struct bfi_phy_stats_req_s *msg =
5180 (struct bfi_phy_stats_req_s *) phy->mb.msg;
5181
5182 msg->instance = phy->instance;
5183 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5184 bfa_ioc_portid(phy->ioc));
5185 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5186 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5187}
5188
5189/*
5190 * Flash memory info API.
5191 *
5192 * @param[in] mincfg - minimal cfg variable
5193 */
5194u32
5195bfa_phy_meminfo(bfa_boolean_t mincfg)
5196{
5197 /* min driver doesn't need phy */
5198 if (mincfg)
5199 return 0;
5200
5201 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5202}
5203
5204/*
5205 * Flash attach API.
5206 *
5207 * @param[in] phy - phy structure
5208 * @param[in] ioc - ioc structure
5209 * @param[in] dev - device structure
5210 * @param[in] trcmod - trace module
5211 * @param[in] logmod - log module
5212 */
5213void
5214bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5215 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5216{
5217 phy->ioc = ioc;
5218 phy->trcmod = trcmod;
5219 phy->cbfn = NULL;
5220 phy->cbarg = NULL;
5221 phy->op_busy = 0;
5222
5223 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5224 bfa_q_qe_init(&phy->ioc_notify);
5225 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5226 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5227
5228 /* min driver doesn't need phy */
5229 if (mincfg) {
5230 phy->dbuf_kva = NULL;
5231 phy->dbuf_pa = 0;
5232 }
5233}
5234
5235/*
5236 * Claim memory for phy
5237 *
5238 * @param[in] phy - phy structure
5239 * @param[in] dm_kva - pointer to virtual memory address
5240 * @param[in] dm_pa - physical memory address
5241 * @param[in] mincfg - minimal cfg variable
5242 */
5243void
5244bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5245 bfa_boolean_t mincfg)
5246{
5247 if (mincfg)
5248 return;
5249
5250 phy->dbuf_kva = dm_kva;
5251 phy->dbuf_pa = dm_pa;
5252 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5253 dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5254 dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5255}
5256
5257bfa_boolean_t
5258bfa_phy_busy(struct bfa_ioc_s *ioc)
5259{
5260 void __iomem *rb;
5261
5262 rb = bfa_ioc_bar0(ioc);
5263 return readl(rb + BFA_PHY_LOCK_STATUS);
5264}
5265
5266/*
5267 * Get phy attribute.
5268 *
5269 * @param[in] phy - phy structure
5270 * @param[in] attr - phy attribute structure
5271 * @param[in] cbfn - callback function
5272 * @param[in] cbarg - callback argument
5273 *
5274 * Return status.
5275 */
5276bfa_status_t
5277bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5278 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5279{
5280 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5281 bfa_trc(phy, instance);
5282
5283 if (!bfa_phy_present(phy))
5284 return BFA_STATUS_PHY_NOT_PRESENT;
5285
5286 if (!bfa_ioc_is_operational(phy->ioc))
5287 return BFA_STATUS_IOC_NON_OP;
5288
5289 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5290 bfa_trc(phy, phy->op_busy);
5291 return BFA_STATUS_DEVBUSY;
5292 }
5293
5294 phy->op_busy = 1;
5295 phy->cbfn = cbfn;
5296 phy->cbarg = cbarg;
5297 phy->instance = instance;
5298 phy->ubuf = (uint8_t *) attr;
5299 bfa_phy_query_send(phy);
5300
5301 return BFA_STATUS_OK;
5302}
5303
5304/*
5305 * Get phy stats.
5306 *
5307 * @param[in] phy - phy structure
5308 * @param[in] instance - phy image instance
5309 * @param[in] stats - pointer to phy stats
5310 * @param[in] cbfn - callback function
5311 * @param[in] cbarg - callback argument
5312 *
5313 * Return status.
5314 */
5315bfa_status_t
5316bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5317 struct bfa_phy_stats_s *stats,
5318 bfa_cb_phy_t cbfn, void *cbarg)
5319{
5320 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5321 bfa_trc(phy, instance);
5322
5323 if (!bfa_phy_present(phy))
5324 return BFA_STATUS_PHY_NOT_PRESENT;
5325
5326 if (!bfa_ioc_is_operational(phy->ioc))
5327 return BFA_STATUS_IOC_NON_OP;
5328
5329 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5330 bfa_trc(phy, phy->op_busy);
5331 return BFA_STATUS_DEVBUSY;
5332 }
5333
5334 phy->op_busy = 1;
5335 phy->cbfn = cbfn;
5336 phy->cbarg = cbarg;
5337 phy->instance = instance;
5338 phy->ubuf = (u8 *) stats;
5339 bfa_phy_stats_send(phy);
5340
5341 return BFA_STATUS_OK;
5342}
5343
5344/*
5345 * Update phy image.
5346 *
5347 * @param[in] phy - phy structure
5348 * @param[in] instance - phy image instance
5349 * @param[in] buf - update data buffer
5350 * @param[in] len - data buffer length
5351 * @param[in] offset - offset relative to starting address
5352 * @param[in] cbfn - callback function
5353 * @param[in] cbarg - callback argument
5354 *
5355 * Return status.
5356 */
5357bfa_status_t
5358bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5359 void *buf, u32 len, u32 offset,
5360 bfa_cb_phy_t cbfn, void *cbarg)
5361{
5362 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5363 bfa_trc(phy, instance);
5364 bfa_trc(phy, len);
5365 bfa_trc(phy, offset);
5366
5367 if (!bfa_phy_present(phy))
5368 return BFA_STATUS_PHY_NOT_PRESENT;
5369
5370 if (!bfa_ioc_is_operational(phy->ioc))
5371 return BFA_STATUS_IOC_NON_OP;
5372
5373 /* 'len' must be in word (4-byte) boundary */
5374 if (!len || (len & 0x03))
5375 return BFA_STATUS_FAILED;
5376
5377 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5378 bfa_trc(phy, phy->op_busy);
5379 return BFA_STATUS_DEVBUSY;
5380 }
5381
5382 phy->op_busy = 1;
5383 phy->cbfn = cbfn;
5384 phy->cbarg = cbarg;
5385 phy->instance = instance;
5386 phy->residue = len;
5387 phy->offset = 0;
5388 phy->addr_off = offset;
5389 phy->ubuf = buf;
5390
5391 bfa_phy_write_send(phy);
5392 return BFA_STATUS_OK;
5393}
5394
5395/*
5396 * Read phy image.
5397 *
5398 * @param[in] phy - phy structure
5399 * @param[in] instance - phy image instance
5400 * @param[in] buf - read data buffer
5401 * @param[in] len - data buffer length
5402 * @param[in] offset - offset relative to starting address
5403 * @param[in] cbfn - callback function
5404 * @param[in] cbarg - callback argument
5405 *
5406 * Return status.
5407 */
5408bfa_status_t
5409bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5410 void *buf, u32 len, u32 offset,
5411 bfa_cb_phy_t cbfn, void *cbarg)
5412{
5413 bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5414 bfa_trc(phy, instance);
5415 bfa_trc(phy, len);
5416 bfa_trc(phy, offset);
5417
5418 if (!bfa_phy_present(phy))
5419 return BFA_STATUS_PHY_NOT_PRESENT;
5420
5421 if (!bfa_ioc_is_operational(phy->ioc))
5422 return BFA_STATUS_IOC_NON_OP;
5423
5424 /* 'len' must be in word (4-byte) boundary */
5425 if (!len || (len & 0x03))
5426 return BFA_STATUS_FAILED;
5427
5428 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5429 bfa_trc(phy, phy->op_busy);
5430 return BFA_STATUS_DEVBUSY;
5431 }
5432
5433 phy->op_busy = 1;
5434 phy->cbfn = cbfn;
5435 phy->cbarg = cbarg;
5436 phy->instance = instance;
5437 phy->residue = len;
5438 phy->offset = 0;
5439 phy->addr_off = offset;
5440 phy->ubuf = buf;
5441 bfa_phy_read_send(phy);
5442
5443 return BFA_STATUS_OK;
5444}
5445
5446/*
5447 * Process phy response messages upon receiving interrupts.
5448 *
5449 * @param[in] phyarg - phy structure
5450 * @param[in] msg - message structure
5451 */
5452void
5453bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5454{
5455 struct bfa_phy_s *phy = phyarg;
5456 u32 status;
5457
5458 union {
5459 struct bfi_phy_query_rsp_s *query;
5460 struct bfi_phy_stats_rsp_s *stats;
5461 struct bfi_phy_write_rsp_s *write;
5462 struct bfi_phy_read_rsp_s *read;
5463 struct bfi_mbmsg_s *msg;
5464 } m;
5465
5466 m.msg = msg;
5467 bfa_trc(phy, msg->mh.msg_id);
5468
5469 if (!phy->op_busy) {
5470 /* receiving response after ioc failure */
5471 bfa_trc(phy, 0x9999);
5472 return;
5473 }
5474
5475 switch (msg->mh.msg_id) {
5476 case BFI_PHY_I2H_QUERY_RSP:
5477 status = be32_to_cpu(m.query->status);
5478 bfa_trc(phy, status);
5479
5480 if (status == BFA_STATUS_OK) {
5481 struct bfa_phy_attr_s *attr =
5482 (struct bfa_phy_attr_s *) phy->ubuf;
5483 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5484 sizeof(struct bfa_phy_attr_s));
5485 bfa_trc(phy, attr->status);
5486 bfa_trc(phy, attr->length);
5487 }
5488
5489 phy->status = status;
5490 phy->op_busy = 0;
5491 if (phy->cbfn)
5492 phy->cbfn(phy->cbarg, phy->status);
5493 break;
5494 case BFI_PHY_I2H_STATS_RSP:
5495 status = be32_to_cpu(m.stats->status);
5496 bfa_trc(phy, status);
5497
5498 if (status == BFA_STATUS_OK) {
5499 struct bfa_phy_stats_s *stats =
5500 (struct bfa_phy_stats_s *) phy->ubuf;
5501 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5502 sizeof(struct bfa_phy_stats_s));
5503 bfa_trc(phy, stats->status);
5504 }
5505
5506 phy->status = status;
5507 phy->op_busy = 0;
5508 if (phy->cbfn)
5509 phy->cbfn(phy->cbarg, phy->status);
5510 break;
5511 case BFI_PHY_I2H_WRITE_RSP:
5512 status = be32_to_cpu(m.write->status);
5513 bfa_trc(phy, status);
5514
5515 if (status != BFA_STATUS_OK || phy->residue == 0) {
5516 phy->status = status;
5517 phy->op_busy = 0;
5518 if (phy->cbfn)
5519 phy->cbfn(phy->cbarg, phy->status);
5520 } else {
5521 bfa_trc(phy, phy->offset);
5522 bfa_phy_write_send(phy);
5523 }
5524 break;
5525 case BFI_PHY_I2H_READ_RSP:
5526 status = be32_to_cpu(m.read->status);
5527 bfa_trc(phy, status);
5528
5529 if (status != BFA_STATUS_OK) {
5530 phy->status = status;
5531 phy->op_busy = 0;
5532 if (phy->cbfn)
5533 phy->cbfn(phy->cbarg, phy->status);
5534 } else {
5535 u32 len = be32_to_cpu(m.read->length);
5536 u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5537 u16 *dbuf = (u16 *)phy->dbuf_kva;
5538 int i, sz = len >> 1;
5539
5540 bfa_trc(phy, phy->offset);
5541 bfa_trc(phy, len);
5542
5543 for (i = 0; i < sz; i++)
5544 buf[i] = be16_to_cpu(dbuf[i]);
5545
5546 phy->residue -= len;
5547 phy->offset += len;
5548
5549 if (phy->residue == 0) {
5550 phy->status = status;
5551 phy->op_busy = 0;
5552 if (phy->cbfn)
5553 phy->cbfn(phy->cbarg, phy->status);
5554 } else
5555 bfa_phy_read_send(phy);
5556 }
5557 break;
5558 default:
5559 WARN_ON(1);
5560 }
5561}
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005562
5563/*
5564 * DCONF module specific
5565 */
5566
5567BFA_MODULE(dconf);
5568
5569/*
5570 * DCONF state machine events
5571 */
5572enum bfa_dconf_event {
5573 BFA_DCONF_SM_INIT = 1, /* dconf Init */
5574 BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
5575 BFA_DCONF_SM_WR = 3, /* binding change, map */
5576 BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
5577 BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
5578 BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
5579};
5580
5581/* forward declaration of DCONF state machine */
5582static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5583 enum bfa_dconf_event event);
5584static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5585 enum bfa_dconf_event event);
5586static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5587 enum bfa_dconf_event event);
5588static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5589 enum bfa_dconf_event event);
5590static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5591 enum bfa_dconf_event event);
5592static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5593 enum bfa_dconf_event event);
5594static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5595 enum bfa_dconf_event event);
5596
5597static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5598static void bfa_dconf_timer(void *cbarg);
5599static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5600static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5601
5602/*
Anatol Pomozov4907cb72012-09-01 10:31:09 -07005603 * Beginning state of dconf module. Waiting for an event to start.
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005604 */
5605static void
5606bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5607{
5608 bfa_status_t bfa_status;
5609 bfa_trc(dconf->bfa, event);
5610
5611 switch (event) {
5612 case BFA_DCONF_SM_INIT:
5613 if (dconf->min_cfg) {
5614 bfa_trc(dconf->bfa, dconf->min_cfg);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005615 bfa_fsm_send_event(&dconf->bfa->iocfc,
5616 IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005617 return;
5618 }
5619 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005620 bfa_timer_start(dconf->bfa, &dconf->timer,
Krishna Gudipati7ac83b12012-09-21 17:24:21 -07005621 bfa_dconf_timer, dconf, 2 * BFA_DCONF_UPDATE_TOV);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005622 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5623 BFA_FLASH_PART_DRV, dconf->instance,
5624 dconf->dconf,
5625 sizeof(struct bfa_dconf_s), 0,
5626 bfa_dconf_init_cb, dconf->bfa);
5627 if (bfa_status != BFA_STATUS_OK) {
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005628 bfa_timer_stop(&dconf->timer);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005629 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5630 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5631 return;
5632 }
5633 break;
5634 case BFA_DCONF_SM_EXIT:
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005635 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005636 case BFA_DCONF_SM_IOCDISABLE:
5637 case BFA_DCONF_SM_WR:
5638 case BFA_DCONF_SM_FLASH_COMP:
5639 break;
5640 default:
5641 bfa_sm_fault(dconf->bfa, event);
5642 }
5643}
5644
5645/*
5646 * Read flash for dconf entries and make a call back to the driver once done.
5647 */
5648static void
5649bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5650 enum bfa_dconf_event event)
5651{
5652 bfa_trc(dconf->bfa, event);
5653
5654 switch (event) {
5655 case BFA_DCONF_SM_FLASH_COMP:
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005656 bfa_timer_stop(&dconf->timer);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005657 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5658 break;
5659 case BFA_DCONF_SM_TIMEOUT:
5660 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
Krishna Gudipati7ac83b12012-09-21 17:24:21 -07005661 bfa_ioc_suspend(&dconf->bfa->ioc);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005662 break;
5663 case BFA_DCONF_SM_EXIT:
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005664 bfa_timer_stop(&dconf->timer);
5665 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5666 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
5667 break;
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005668 case BFA_DCONF_SM_IOCDISABLE:
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005669 bfa_timer_stop(&dconf->timer);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005670 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5671 break;
5672 default:
5673 bfa_sm_fault(dconf->bfa, event);
5674 }
5675}
5676
5677/*
5678 * DCONF Module is in ready state. Has completed the initialization.
5679 */
5680static void
5681bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5682{
5683 bfa_trc(dconf->bfa, event);
5684
5685 switch (event) {
5686 case BFA_DCONF_SM_WR:
5687 bfa_timer_start(dconf->bfa, &dconf->timer,
5688 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5689 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5690 break;
5691 case BFA_DCONF_SM_EXIT:
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005692 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005693 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005694 break;
5695 case BFA_DCONF_SM_INIT:
5696 case BFA_DCONF_SM_IOCDISABLE:
5697 break;
5698 default:
5699 bfa_sm_fault(dconf->bfa, event);
5700 }
5701}
5702
5703/*
5704 * entries are dirty, write back to the flash.
5705 */
5706
5707static void
5708bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5709{
5710 bfa_trc(dconf->bfa, event);
5711
5712 switch (event) {
5713 case BFA_DCONF_SM_TIMEOUT:
5714 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5715 bfa_dconf_flash_write(dconf);
5716 break;
5717 case BFA_DCONF_SM_WR:
5718 bfa_timer_stop(&dconf->timer);
5719 bfa_timer_start(dconf->bfa, &dconf->timer,
5720 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5721 break;
5722 case BFA_DCONF_SM_EXIT:
5723 bfa_timer_stop(&dconf->timer);
5724 bfa_timer_start(dconf->bfa, &dconf->timer,
5725 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5726 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5727 bfa_dconf_flash_write(dconf);
5728 break;
5729 case BFA_DCONF_SM_FLASH_COMP:
5730 break;
5731 case BFA_DCONF_SM_IOCDISABLE:
5732 bfa_timer_stop(&dconf->timer);
5733 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5734 break;
5735 default:
5736 bfa_sm_fault(dconf->bfa, event);
5737 }
5738}
5739
5740/*
5741 * Sync the dconf entries to the flash.
5742 */
5743static void
5744bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5745 enum bfa_dconf_event event)
5746{
5747 bfa_trc(dconf->bfa, event);
5748
5749 switch (event) {
5750 case BFA_DCONF_SM_IOCDISABLE:
5751 case BFA_DCONF_SM_FLASH_COMP:
5752 bfa_timer_stop(&dconf->timer);
5753 case BFA_DCONF_SM_TIMEOUT:
5754 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005755 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005756 break;
5757 default:
5758 bfa_sm_fault(dconf->bfa, event);
5759 }
5760}
5761
5762static void
5763bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5764{
5765 bfa_trc(dconf->bfa, event);
5766
5767 switch (event) {
5768 case BFA_DCONF_SM_FLASH_COMP:
5769 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5770 break;
5771 case BFA_DCONF_SM_WR:
5772 bfa_timer_start(dconf->bfa, &dconf->timer,
5773 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5774 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5775 break;
5776 case BFA_DCONF_SM_EXIT:
5777 bfa_timer_start(dconf->bfa, &dconf->timer,
5778 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5779 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5780 break;
5781 case BFA_DCONF_SM_IOCDISABLE:
5782 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5783 break;
5784 default:
5785 bfa_sm_fault(dconf->bfa, event);
5786 }
5787}
5788
5789static void
5790bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5791 enum bfa_dconf_event event)
5792{
5793 bfa_trc(dconf->bfa, event);
5794
5795 switch (event) {
5796 case BFA_DCONF_SM_INIT:
5797 bfa_timer_start(dconf->bfa, &dconf->timer,
5798 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5799 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5800 break;
5801 case BFA_DCONF_SM_EXIT:
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005802 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005803 bfa_fsm_send_event(&dconf->bfa->iocfc, IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005804 break;
5805 case BFA_DCONF_SM_IOCDISABLE:
5806 break;
5807 default:
5808 bfa_sm_fault(dconf->bfa, event);
5809 }
5810}
5811
5812/*
5813 * Compute and return memory needed by DRV_CFG module.
5814 */
5815static void
5816bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5817 struct bfa_s *bfa)
5818{
5819 struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5820
5821 if (cfg->drvcfg.min_cfg)
5822 bfa_mem_kva_setup(meminfo, dconf_kva,
5823 sizeof(struct bfa_dconf_hdr_s));
5824 else
5825 bfa_mem_kva_setup(meminfo, dconf_kva,
5826 sizeof(struct bfa_dconf_s));
5827}
5828
5829static void
5830bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5831 struct bfa_pcidev_s *pcidev)
5832{
5833 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5834
5835 dconf->bfad = bfad;
5836 dconf->bfa = bfa;
5837 dconf->instance = bfa->ioc.port_id;
5838 bfa_trc(bfa, dconf->instance);
5839
5840 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5841 if (cfg->drvcfg.min_cfg) {
5842 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5843 dconf->min_cfg = BFA_TRUE;
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005844 } else {
5845 dconf->min_cfg = BFA_FALSE;
5846 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5847 }
5848
5849 bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5850 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5851}
5852
5853static void
5854bfa_dconf_init_cb(void *arg, bfa_status_t status)
5855{
5856 struct bfa_s *bfa = arg;
5857 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5858
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005859 if (status == BFA_STATUS_OK) {
5860 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5861 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5862 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5863 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5864 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5865 }
Krishna Gudipati7ac83b12012-09-21 17:24:21 -07005866 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
Krishna Gudipatidb9d8a72012-03-13 17:39:36 -07005867 bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DCONF_DONE);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005868}
5869
5870void
5871bfa_dconf_modinit(struct bfa_s *bfa)
5872{
5873 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5874 bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5875}
5876static void
5877bfa_dconf_start(struct bfa_s *bfa)
5878{
5879}
5880
5881static void
5882bfa_dconf_stop(struct bfa_s *bfa)
5883{
5884}
5885
5886static void bfa_dconf_timer(void *cbarg)
5887{
5888 struct bfa_dconf_mod_s *dconf = cbarg;
5889 bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5890}
5891static void
5892bfa_dconf_iocdisable(struct bfa_s *bfa)
5893{
5894 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5895 bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5896}
5897
5898static void
5899bfa_dconf_detach(struct bfa_s *bfa)
5900{
5901}
5902
5903static bfa_status_t
5904bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5905{
5906 bfa_status_t bfa_status;
5907 bfa_trc(dconf->bfa, 0);
5908
5909 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5910 BFA_FLASH_PART_DRV, dconf->instance,
5911 dconf->dconf, sizeof(struct bfa_dconf_s), 0,
5912 bfa_dconf_cbfn, dconf);
5913 if (bfa_status != BFA_STATUS_OK)
5914 WARN_ON(bfa_status);
5915 bfa_trc(dconf->bfa, bfa_status);
5916
5917 return bfa_status;
5918}
5919
5920bfa_status_t
5921bfa_dconf_update(struct bfa_s *bfa)
5922{
5923 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5924 bfa_trc(dconf->bfa, 0);
5925 if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5926 return BFA_STATUS_FAILED;
5927
5928 if (dconf->min_cfg) {
5929 bfa_trc(dconf->bfa, dconf->min_cfg);
5930 return BFA_STATUS_FAILED;
5931 }
5932
5933 bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5934 return BFA_STATUS_OK;
5935}
5936
5937static void
5938bfa_dconf_cbfn(void *arg, bfa_status_t status)
5939{
5940 struct bfa_dconf_mod_s *dconf = arg;
5941 WARN_ON(status);
5942 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5943}
5944
5945void
5946bfa_dconf_modexit(struct bfa_s *bfa)
5947{
5948 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005949 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
5950}