blob: 3164f7bec475730a39a332a98e9fabbc209e7a8d [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipati7826f302011-07-20 16:59:13 -070019#include "bfad_im.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070020#include "bfa_ioc.h"
Krishna Gudipati11189202011-06-13 15:50:35 -070021#include "bfi_reg.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022#include "bfa_defs.h"
23#include "bfa_defs_svc.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070024
Krishna Gudipati7af074d2010-03-05 19:35:45 -080025BFA_TRC_FILE(CNA, IOC);
Jing Huang7725ccf2009-09-23 17:46:15 -070026
Jing Huang5fbe25c2010-10-18 17:17:23 -070027/*
Jing Huang7725ccf2009-09-23 17:46:15 -070028 * IOC local definitions
29 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070030#define BFA_IOC_TOV 3000 /* msecs */
31#define BFA_IOC_HWSEM_TOV 500 /* msecs */
32#define BFA_IOC_HB_TOV 500 /* msecs */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070033#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
Krishna Gudipati775c7742011-06-13 15:52:12 -070034#define BFA_IOC_POLL_TOV BFA_TIMER_FREQ
Jing Huang7725ccf2009-09-23 17:46:15 -070035
36#define bfa_ioc_timer_start(__ioc) \
37 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
38 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
39#define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
40
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070041#define bfa_hb_timer_start(__ioc) \
42 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
43 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
44#define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
45
Jing Huang7725ccf2009-09-23 17:46:15 -070046#define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
Jing Huang7725ccf2009-09-23 17:46:15 -070047
Jing Huang5fbe25c2010-10-18 17:17:23 -070048/*
Krishna Gudipati0a20de42010-03-05 19:34:20 -080049 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50 */
51
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070052#define bfa_ioc_firmware_lock(__ioc) \
Krishna Gudipati0a20de42010-03-05 19:34:20 -080053 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070054#define bfa_ioc_firmware_unlock(__ioc) \
Krishna Gudipati0a20de42010-03-05 19:34:20 -080055 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
Krishna Gudipati0a20de42010-03-05 19:34:20 -080056#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
Krishna Gudipatif1d584d2010-12-13 16:17:11 -080058#define bfa_ioc_notify_fail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
Jing Huang45d7f0c2011-04-13 11:45:53 -070060#define bfa_ioc_sync_start(__ioc) \
61 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
Krishna Gudipatif1d584d2010-12-13 16:17:11 -080062#define bfa_ioc_sync_join(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
64#define bfa_ioc_sync_leave(__ioc) \
65 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
66#define bfa_ioc_sync_ack(__ioc) \
67 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
68#define bfa_ioc_sync_complete(__ioc) \
69 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070070
71#define bfa_ioc_mbox_cmd_pending(__ioc) \
72 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
Jing Huang53440262010-10-18 17:12:29 -070073 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070074
75bfa_boolean_t bfa_auto_recover = BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -070076
77/*
78 * forward declarations
79 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070080static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070081static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
82static void bfa_ioc_timeout(void *ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -070083static void bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070084static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
85static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
86static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
87static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070088static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
Krishna Gudipati8b070b42011-06-13 15:52:40 -070089static void bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070090static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
91static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
Krishna Gudipatid37779f2011-06-13 15:42:10 -070092static void bfa_ioc_event_notify(struct bfa_ioc_s *ioc ,
93 enum bfa_ioc_event_e event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070094static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
95static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -080096static void bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc);
97static void bfa_ioc_fail_notify(struct bfa_ioc_s *ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070098static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -070099
Jing Huang5fbe25c2010-10-18 17:17:23 -0700100/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700101 * IOC state machine definitions/declarations
Jing Huang7725ccf2009-09-23 17:46:15 -0700102 */
103enum ioc_event {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700104 IOC_E_RESET = 1, /* IOC reset request */
105 IOC_E_ENABLE = 2, /* IOC enable request */
106 IOC_E_DISABLE = 3, /* IOC disable request */
107 IOC_E_DETACH = 4, /* driver detach cleanup */
108 IOC_E_ENABLED = 5, /* f/w enabled */
109 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
110 IOC_E_DISABLED = 7, /* f/w disabled */
Krishna Gudipati775c7742011-06-13 15:52:12 -0700111 IOC_E_PFFAILED = 8, /* failure notice by iocpf sm */
112 IOC_E_HBFAIL = 9, /* heartbeat failure */
113 IOC_E_HWERROR = 10, /* hardware error interrupt */
114 IOC_E_TIMEOUT = 11, /* timeout */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700115 IOC_E_HWFAILED = 12, /* PCI mapping failure notice */
Krishna Gudipatia7141342011-06-24 20:23:19 -0700116 IOC_E_FWRSP_ACQ_ADDR = 13, /* Acquiring address */
Jing Huang7725ccf2009-09-23 17:46:15 -0700117};
118
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700119bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700120bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700121bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
122bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
123bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800124bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700125bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700126bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
127bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700128bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc_s, enum ioc_event);
Krishna Gudipatia7141342011-06-24 20:23:19 -0700129bfa_fsm_state_decl(bfa_ioc, acq_addr, struct bfa_ioc_s, enum ioc_event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700130
131static struct bfa_sm_table_s ioc_sm_table[] = {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700132 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
Jing Huang7725ccf2009-09-23 17:46:15 -0700133 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700134 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
Jing Huang7725ccf2009-09-23 17:46:15 -0700135 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
136 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800137 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700138 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
Jing Huang7725ccf2009-09-23 17:46:15 -0700139 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
140 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700141 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
Krishna Gudipatia7141342011-06-24 20:23:19 -0700142 {BFA_SM(bfa_ioc_sm_acq_addr), BFA_IOC_ACQ_ADDR},
Jing Huang7725ccf2009-09-23 17:46:15 -0700143};
144
Jing Huang5fbe25c2010-10-18 17:17:23 -0700145/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700146 * IOCPF state machine definitions/declarations
147 */
148
149#define bfa_iocpf_timer_start(__ioc) \
150 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
151 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
152#define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
153
Krishna Gudipati775c7742011-06-13 15:52:12 -0700154#define bfa_iocpf_poll_timer_start(__ioc) \
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700155 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
Krishna Gudipati775c7742011-06-13 15:52:12 -0700156 bfa_iocpf_poll_timeout, (__ioc), BFA_IOC_POLL_TOV)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700157
158#define bfa_sem_timer_start(__ioc) \
159 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
160 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
161#define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
162
163/*
164 * Forward declareations for iocpf state machine
165 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700166static void bfa_iocpf_timeout(void *ioc_arg);
167static void bfa_iocpf_sem_timeout(void *ioc_arg);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700168static void bfa_iocpf_poll_timeout(void *ioc_arg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700169
Jing Huang5fbe25c2010-10-18 17:17:23 -0700170/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700171 * IOCPF state machine events
172 */
173enum iocpf_event {
174 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
175 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
176 IOCPF_E_STOP = 3, /* stop on driver detach */
177 IOCPF_E_FWREADY = 4, /* f/w initialization done */
178 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
179 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
180 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
181 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
182 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
183 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
184 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700185 IOCPF_E_SEM_ERROR = 12, /* h/w sem mapping error */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700186};
187
Jing Huang5fbe25c2010-10-18 17:17:23 -0700188/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700189 * IOCPF states
190 */
191enum bfa_iocpf_state {
192 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
193 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
194 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
195 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
196 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
197 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
198 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
199 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
200 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
201};
202
203bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
204bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
205bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
206bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
207bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
208bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
209bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800210bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf_s,
211 enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700212bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800213bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700214bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
215bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800216bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf_s,
217 enum iocpf_event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700218bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
219
220static struct bfa_sm_table_s iocpf_sm_table[] = {
221 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
222 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
223 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
224 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
225 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
226 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
227 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800228 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700229 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800230 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700231 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
232 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800233 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700234 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
235};
236
Jing Huang5fbe25c2010-10-18 17:17:23 -0700237/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700238 * IOC State Machine
239 */
240
Jing Huang5fbe25c2010-10-18 17:17:23 -0700241/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700242 * Beginning state. IOC uninit state.
243 */
244
245static void
246bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
247{
248}
249
Jing Huang5fbe25c2010-10-18 17:17:23 -0700250/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700251 * IOC is in uninit state.
252 */
253static void
254bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
255{
256 bfa_trc(ioc, event);
257
258 switch (event) {
259 case IOC_E_RESET:
260 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
261 break;
262
263 default:
264 bfa_sm_fault(ioc, event);
265 }
266}
Jing Huang5fbe25c2010-10-18 17:17:23 -0700267/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700268 * Reset entry actions -- initialize state machine
269 */
270static void
271bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
272{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700273 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
Jing Huang7725ccf2009-09-23 17:46:15 -0700274}
275
Jing Huang5fbe25c2010-10-18 17:17:23 -0700276/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700277 * IOC is in reset state.
Jing Huang7725ccf2009-09-23 17:46:15 -0700278 */
279static void
280bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
281{
282 bfa_trc(ioc, event);
283
284 switch (event) {
285 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700286 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
287 break;
288
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700289 case IOC_E_DISABLE:
290 bfa_ioc_disable_comp(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -0700291 break;
292
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700293 case IOC_E_DETACH:
294 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Jing Huang7725ccf2009-09-23 17:46:15 -0700295 break;
296
297 default:
298 bfa_sm_fault(ioc, event);
299 }
300}
301
302
303static void
304bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
305{
Maggie Zhangf7f73812010-12-09 19:08:43 -0800306 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -0700307}
308
Jing Huang5fbe25c2010-10-18 17:17:23 -0700309/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700310 * Host IOC function is being enabled, awaiting response from firmware.
311 * Semaphore is acquired.
312 */
313static void
314bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
315{
316 bfa_trc(ioc, event);
317
318 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700319 case IOC_E_ENABLED:
Jing Huang7725ccf2009-09-23 17:46:15 -0700320 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
321 break;
322
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800323 case IOC_E_PFFAILED:
324 /* !!! fall through !!! */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700325 case IOC_E_HWERROR:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800326 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700327 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800328 if (event != IOC_E_PFFAILED)
329 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700330 break;
331
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700332 case IOC_E_HWFAILED:
333 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
334 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
335 break;
336
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700337 case IOC_E_DISABLE:
338 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
339 break;
340
341 case IOC_E_DETACH:
342 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f73812010-12-09 19:08:43 -0800343 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700344 break;
345
346 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700347 break;
348
349 default:
350 bfa_sm_fault(ioc, event);
351 }
352}
353
354
355static void
356bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
357{
358 bfa_ioc_timer_start(ioc);
359 bfa_ioc_send_getattr(ioc);
360}
361
Jing Huang5fbe25c2010-10-18 17:17:23 -0700362/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700363 * IOC configuration in progress. Timer is active.
364 */
365static void
366bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
367{
368 bfa_trc(ioc, event);
369
370 switch (event) {
371 case IOC_E_FWRSP_GETATTR:
372 bfa_ioc_timer_stop(ioc);
Jing Huang07b28382010-07-08 19:59:24 -0700373 bfa_ioc_check_attr_wwns(ioc);
Krishna Gudipatia7141342011-06-24 20:23:19 -0700374 bfa_ioc_hb_monitor(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -0700375 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
376 break;
377
Krishna Gudipatia7141342011-06-24 20:23:19 -0700378 case IOC_E_FWRSP_ACQ_ADDR:
379 bfa_ioc_timer_stop(ioc);
380 bfa_ioc_hb_monitor(ioc);
381 bfa_fsm_set_state(ioc, bfa_ioc_sm_acq_addr);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700382 break;
Krishna Gudipatia7141342011-06-24 20:23:19 -0700383
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800384 case IOC_E_PFFAILED:
Jing Huang7725ccf2009-09-23 17:46:15 -0700385 case IOC_E_HWERROR:
386 bfa_ioc_timer_stop(ioc);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800387 /* !!! fall through !!! */
Jing Huang7725ccf2009-09-23 17:46:15 -0700388 case IOC_E_TIMEOUT:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800389 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700390 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800391 if (event != IOC_E_PFFAILED)
392 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700393 break;
394
395 case IOC_E_DISABLE:
396 bfa_ioc_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700397 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
398 break;
399
400 case IOC_E_ENABLE:
Jing Huang7725ccf2009-09-23 17:46:15 -0700401 break;
402
403 default:
404 bfa_sm_fault(ioc, event);
405 }
406}
407
Krishna Gudipatia7141342011-06-24 20:23:19 -0700408/*
409 * Acquiring address from fabric (entry function)
410 */
411static void
412bfa_ioc_sm_acq_addr_entry(struct bfa_ioc_s *ioc)
413{
414}
415
416/*
417 * Acquiring address from the fabric
418 */
419static void
420bfa_ioc_sm_acq_addr(struct bfa_ioc_s *ioc, enum ioc_event event)
421{
422 bfa_trc(ioc, event);
423
424 switch (event) {
425 case IOC_E_FWRSP_GETATTR:
426 bfa_ioc_check_attr_wwns(ioc);
427 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
428 break;
429
430 case IOC_E_PFFAILED:
431 case IOC_E_HWERROR:
432 bfa_hb_timer_stop(ioc);
433 case IOC_E_HBFAIL:
434 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
435 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
436 if (event != IOC_E_PFFAILED)
437 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
438 break;
439
440 case IOC_E_DISABLE:
441 bfa_hb_timer_stop(ioc);
442 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
443 break;
444
445 case IOC_E_ENABLE:
446 break;
447
448 default:
449 bfa_sm_fault(ioc, event);
450 }
451}
Jing Huang7725ccf2009-09-23 17:46:15 -0700452
453static void
454bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
455{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700456 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
457
Jing Huang7725ccf2009-09-23 17:46:15 -0700458 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
Krishna Gudipatid37779f2011-06-13 15:42:10 -0700459 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
Jing Huang88166242010-12-09 17:11:53 -0800460 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC enabled\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -0700461 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_ENABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -0700462}
463
464static void
465bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
466{
467 bfa_trc(ioc, event);
468
469 switch (event) {
470 case IOC_E_ENABLE:
471 break;
472
473 case IOC_E_DISABLE:
Maggie Zhangf7f73812010-12-09 19:08:43 -0800474 bfa_hb_timer_stop(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -0700475 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
476 break;
477
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800478 case IOC_E_PFFAILED:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700479 case IOC_E_HWERROR:
Maggie Zhangf7f73812010-12-09 19:08:43 -0800480 bfa_hb_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700481 /* !!! fall through !!! */
Jing Huang7725ccf2009-09-23 17:46:15 -0700482 case IOC_E_HBFAIL:
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800483 if (ioc->iocpf.auto_recover)
484 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
485 else
486 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
487
Krishna Gudipati775c7742011-06-13 15:52:12 -0700488 bfa_ioc_fail_notify(ioc);
489
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800490 if (event != IOC_E_PFFAILED)
491 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700492 break;
493
494 default:
495 bfa_sm_fault(ioc, event);
496 }
497}
498
499
500static void
501bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
502{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700503 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
Maggie Zhangf7f73812010-12-09 19:08:43 -0800504 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
Jing Huang88166242010-12-09 17:11:53 -0800505 BFA_LOG(KERN_INFO, bfad, bfa_log_level, "IOC disabled\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -0700506 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_DISABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -0700507}
508
Jing Huang5fbe25c2010-10-18 17:17:23 -0700509/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700510 * IOC is being disabled
511 */
512static void
513bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
514{
515 bfa_trc(ioc, event);
516
517 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700518 case IOC_E_DISABLED:
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800519 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
520 break;
521
522 case IOC_E_HWERROR:
Jing Huang7725ccf2009-09-23 17:46:15 -0700523 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700524 * No state change. Will move to disabled state
525 * after iocpf sm completes failure processing and
526 * moves to disabled state.
Jing Huang7725ccf2009-09-23 17:46:15 -0700527 */
Maggie Zhangf7f73812010-12-09 19:08:43 -0800528 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
Jing Huang7725ccf2009-09-23 17:46:15 -0700529 break;
530
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700531 case IOC_E_HWFAILED:
532 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
533 bfa_ioc_disable_comp(ioc);
534 break;
535
Jing Huang7725ccf2009-09-23 17:46:15 -0700536 default:
537 bfa_sm_fault(ioc, event);
538 }
539}
540
Jing Huang5fbe25c2010-10-18 17:17:23 -0700541/*
Jing Huang7725ccf2009-09-23 17:46:15 -0700542 * IOC disable completion entry.
543 */
544static void
545bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
546{
547 bfa_ioc_disable_comp(ioc);
548}
549
550static void
551bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
552{
553 bfa_trc(ioc, event);
554
555 switch (event) {
556 case IOC_E_ENABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700557 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700558 break;
559
560 case IOC_E_DISABLE:
561 ioc->cbfn->disable_cbfn(ioc->bfa);
562 break;
563
Jing Huang7725ccf2009-09-23 17:46:15 -0700564 case IOC_E_DETACH:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700565 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f73812010-12-09 19:08:43 -0800566 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Jing Huang7725ccf2009-09-23 17:46:15 -0700567 break;
568
569 default:
570 bfa_sm_fault(ioc, event);
571 }
572}
573
574
575static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800576bfa_ioc_sm_fail_retry_entry(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -0700577{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800578 bfa_trc(ioc, 0);
Jing Huang7725ccf2009-09-23 17:46:15 -0700579}
580
Jing Huang5fbe25c2010-10-18 17:17:23 -0700581/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800582 * Hardware initialization retry.
Jing Huang7725ccf2009-09-23 17:46:15 -0700583 */
584static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800585bfa_ioc_sm_fail_retry(struct bfa_ioc_s *ioc, enum ioc_event event)
Jing Huang7725ccf2009-09-23 17:46:15 -0700586{
587 bfa_trc(ioc, event);
588
589 switch (event) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700590 case IOC_E_ENABLED:
591 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
592 break;
593
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800594 case IOC_E_PFFAILED:
595 case IOC_E_HWERROR:
Jing Huang5fbe25c2010-10-18 17:17:23 -0700596 /*
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800597 * Initialization retry failed.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700598 */
599 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700600 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800601 if (event != IOC_E_PFFAILED)
602 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
603 break;
604
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700605 case IOC_E_HWFAILED:
606 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
607 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
608 break;
609
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800610 case IOC_E_ENABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700611 break;
612
Jing Huang7725ccf2009-09-23 17:46:15 -0700613 case IOC_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700614 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700615 break;
616
617 case IOC_E_DETACH:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700618 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
Maggie Zhangf7f73812010-12-09 19:08:43 -0800619 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
Jing Huang7725ccf2009-09-23 17:46:15 -0700620 break;
621
622 default:
623 bfa_sm_fault(ioc, event);
624 }
625}
626
627
628static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700629bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -0700630{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -0800631 bfa_trc(ioc, 0);
Jing Huang7725ccf2009-09-23 17:46:15 -0700632}
633
Jing Huang5fbe25c2010-10-18 17:17:23 -0700634/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700635 * IOC failure.
Jing Huang7725ccf2009-09-23 17:46:15 -0700636 */
637static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700638bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
Jing Huang7725ccf2009-09-23 17:46:15 -0700639{
640 bfa_trc(ioc, event);
641
642 switch (event) {
643
644 case IOC_E_ENABLE:
645 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
646 break;
647
648 case IOC_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700649 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
Jing Huang7725ccf2009-09-23 17:46:15 -0700650 break;
651
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800652 case IOC_E_DETACH:
653 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
654 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
655 break;
656
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800657 case IOC_E_HWERROR:
658 /*
659 * HB failure notification, ignore.
660 */
661 break;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700662 default:
663 bfa_sm_fault(ioc, event);
664 }
665}
666
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700667static void
668bfa_ioc_sm_hwfail_entry(struct bfa_ioc_s *ioc)
669{
670 bfa_trc(ioc, 0);
671}
672
673static void
674bfa_ioc_sm_hwfail(struct bfa_ioc_s *ioc, enum ioc_event event)
675{
676 bfa_trc(ioc, event);
677
678 switch (event) {
679 case IOC_E_ENABLE:
680 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
681 break;
682
683 case IOC_E_DISABLE:
684 ioc->cbfn->disable_cbfn(ioc->bfa);
685 break;
686
687 case IOC_E_DETACH:
688 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
689 break;
690
691 default:
692 bfa_sm_fault(ioc, event);
693 }
694}
695
Jing Huang5fbe25c2010-10-18 17:17:23 -0700696/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700697 * IOCPF State Machine
698 */
699
Jing Huang5fbe25c2010-10-18 17:17:23 -0700700/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700701 * Reset entry actions -- initialize state machine
702 */
703static void
704bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
705{
Krishna Gudipati775c7742011-06-13 15:52:12 -0700706 iocpf->fw_mismatch_notified = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700707 iocpf->auto_recover = bfa_auto_recover;
708}
709
Jing Huang5fbe25c2010-10-18 17:17:23 -0700710/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700711 * Beginning state. IOC is in reset state.
712 */
713static void
714bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
715{
716 struct bfa_ioc_s *ioc = iocpf->ioc;
717
718 bfa_trc(ioc, event);
719
720 switch (event) {
721 case IOCPF_E_ENABLE:
722 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
723 break;
724
725 case IOCPF_E_STOP:
726 break;
727
728 default:
729 bfa_sm_fault(ioc, event);
730 }
731}
732
Jing Huang5fbe25c2010-10-18 17:17:23 -0700733/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700734 * Semaphore should be acquired for version check.
735 */
736static void
737bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
738{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700739 struct bfi_ioc_image_hdr_s fwhdr;
Krishna Gudipati89196782012-03-13 17:38:56 -0700740 u32 r32, fwstate, pgnum, pgoff, loff = 0;
741 int i;
742
743 /*
744 * Spin on init semaphore to serialize.
745 */
746 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
747 while (r32 & 0x1) {
748 udelay(20);
749 r32 = readl(iocpf->ioc->ioc_regs.ioc_init_sem_reg);
750 }
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700751
752 /* h/w sem init */
Krishna Gudipati89196782012-03-13 17:38:56 -0700753 fwstate = readl(iocpf->ioc->ioc_regs.ioc_fwstate);
754 if (fwstate == BFI_IOC_UNINIT) {
755 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700756 goto sem_get;
Krishna Gudipati89196782012-03-13 17:38:56 -0700757 }
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700758
759 bfa_ioc_fwver_get(iocpf->ioc, &fwhdr);
760
Krishna Gudipati89196782012-03-13 17:38:56 -0700761 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
762 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700763 goto sem_get;
Krishna Gudipati89196782012-03-13 17:38:56 -0700764 }
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700765
766 /*
Krishna Gudipati89196782012-03-13 17:38:56 -0700767 * Clear fwver hdr
768 */
769 pgnum = PSS_SMEM_PGNUM(iocpf->ioc->ioc_regs.smem_pg0, loff);
770 pgoff = PSS_SMEM_PGOFF(loff);
771 writel(pgnum, iocpf->ioc->ioc_regs.host_page_num_fn);
772
773 for (i = 0; i < sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32); i++) {
774 bfa_mem_write(iocpf->ioc->ioc_regs.smem_page_start, loff, 0);
775 loff += sizeof(u32);
776 }
777
778 bfa_trc(iocpf->ioc, fwstate);
779 bfa_trc(iocpf->ioc, swab32(fwhdr.exec));
780 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.ioc_fwstate);
781 writel(BFI_IOC_UNINIT, iocpf->ioc->ioc_regs.alt_ioc_fwstate);
782
783 /*
784 * Unlock the hw semaphore. Should be here only once per boot.
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700785 */
786 readl(iocpf->ioc->ioc_regs.ioc_sem_reg);
787 writel(1, iocpf->ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipati89196782012-03-13 17:38:56 -0700788
789 /*
790 * unlock init semaphore.
791 */
792 writel(1, iocpf->ioc->ioc_regs.ioc_init_sem_reg);
793
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700794sem_get:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700795 bfa_ioc_hw_sem_get(iocpf->ioc);
796}
797
Jing Huang5fbe25c2010-10-18 17:17:23 -0700798/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700799 * Awaiting h/w semaphore to continue with version check.
800 */
801static void
802bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
803{
804 struct bfa_ioc_s *ioc = iocpf->ioc;
805
806 bfa_trc(ioc, event);
807
808 switch (event) {
809 case IOCPF_E_SEMLOCKED:
810 if (bfa_ioc_firmware_lock(ioc)) {
Jing Huang45d7f0c2011-04-13 11:45:53 -0700811 if (bfa_ioc_sync_start(ioc)) {
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800812 bfa_ioc_sync_join(ioc);
813 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
814 } else {
815 bfa_ioc_firmware_unlock(ioc);
816 writel(1, ioc->ioc_regs.ioc_sem_reg);
817 bfa_sem_timer_start(ioc);
818 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700819 } else {
Maggie Zhangf7f73812010-12-09 19:08:43 -0800820 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700821 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
822 }
823 break;
824
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700825 case IOCPF_E_SEM_ERROR:
826 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
827 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
828 break;
829
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700830 case IOCPF_E_DISABLE:
Maggie Zhangf7f73812010-12-09 19:08:43 -0800831 bfa_sem_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700832 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
Maggie Zhangf7f73812010-12-09 19:08:43 -0800833 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700834 break;
835
836 case IOCPF_E_STOP:
Maggie Zhangf7f73812010-12-09 19:08:43 -0800837 bfa_sem_timer_stop(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700838 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
839 break;
840
841 default:
842 bfa_sm_fault(ioc, event);
843 }
844}
845
Jing Huang5fbe25c2010-10-18 17:17:23 -0700846/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700847 * Notify enable completion callback.
848 */
849static void
850bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
851{
852 /*
853 * Call only the first time sm enters fwmismatch state.
854 */
Krishna Gudipati775c7742011-06-13 15:52:12 -0700855 if (iocpf->fw_mismatch_notified == BFA_FALSE)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700856 bfa_ioc_pf_fwmismatch(iocpf->ioc);
857
Krishna Gudipati775c7742011-06-13 15:52:12 -0700858 iocpf->fw_mismatch_notified = BFA_TRUE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700859 bfa_iocpf_timer_start(iocpf->ioc);
860}
861
Jing Huang5fbe25c2010-10-18 17:17:23 -0700862/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700863 * Awaiting firmware version match.
864 */
865static void
866bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
867{
868 struct bfa_ioc_s *ioc = iocpf->ioc;
869
870 bfa_trc(ioc, event);
871
872 switch (event) {
873 case IOCPF_E_TIMEOUT:
874 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
875 break;
876
877 case IOCPF_E_DISABLE:
878 bfa_iocpf_timer_stop(ioc);
879 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
Maggie Zhangf7f73812010-12-09 19:08:43 -0800880 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700881 break;
882
883 case IOCPF_E_STOP:
884 bfa_iocpf_timer_stop(ioc);
885 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
886 break;
887
888 default:
889 bfa_sm_fault(ioc, event);
890 }
891}
892
Jing Huang5fbe25c2010-10-18 17:17:23 -0700893/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700894 * Request for semaphore.
895 */
896static void
897bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
898{
899 bfa_ioc_hw_sem_get(iocpf->ioc);
900}
901
Jing Huang5fbe25c2010-10-18 17:17:23 -0700902/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700903 * Awaiting semaphore for h/w initialzation.
904 */
905static void
906bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
907{
908 struct bfa_ioc_s *ioc = iocpf->ioc;
909
910 bfa_trc(ioc, event);
911
912 switch (event) {
913 case IOCPF_E_SEMLOCKED:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800914 if (bfa_ioc_sync_complete(ioc)) {
915 bfa_ioc_sync_join(ioc);
916 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
917 } else {
918 writel(1, ioc->ioc_regs.ioc_sem_reg);
919 bfa_sem_timer_start(ioc);
920 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700921 break;
922
Krishna Gudipati5a0adae2011-06-24 20:22:56 -0700923 case IOCPF_E_SEM_ERROR:
924 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
925 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
926 break;
927
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700928 case IOCPF_E_DISABLE:
Maggie Zhangf7f73812010-12-09 19:08:43 -0800929 bfa_sem_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800930 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700931 break;
932
933 default:
934 bfa_sm_fault(ioc, event);
935 }
936}
937
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700938static void
939bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
940{
Krishna Gudipati775c7742011-06-13 15:52:12 -0700941 iocpf->poll_time = 0;
Maggie Zhangf7f73812010-12-09 19:08:43 -0800942 bfa_ioc_hwinit(iocpf->ioc, BFA_FALSE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700943}
944
Jing Huang5fbe25c2010-10-18 17:17:23 -0700945/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700946 * Hardware is being initialized. Interrupts are enabled.
947 * Holding hardware semaphore lock.
948 */
949static void
950bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
951{
952 struct bfa_ioc_s *ioc = iocpf->ioc;
953
954 bfa_trc(ioc, event);
955
956 switch (event) {
957 case IOCPF_E_FWREADY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700958 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
959 break;
960
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700961 case IOCPF_E_TIMEOUT:
Maggie Zhangf7f73812010-12-09 19:08:43 -0800962 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700963 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800964 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700965 break;
966
967 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700968 bfa_iocpf_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -0800969 bfa_ioc_sync_leave(ioc);
970 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700971 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
972 break;
973
974 default:
975 bfa_sm_fault(ioc, event);
976 }
977}
978
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700979static void
980bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
981{
982 bfa_iocpf_timer_start(iocpf->ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -0700983 /*
984 * Enable Interrupts before sending fw IOC ENABLE cmd.
985 */
986 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700987 bfa_ioc_send_enable(iocpf->ioc);
988}
989
Jing Huang5fbe25c2010-10-18 17:17:23 -0700990/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700991 * Host IOC function is being enabled, awaiting response from firmware.
992 * Semaphore is acquired.
993 */
994static void
995bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
996{
997 struct bfa_ioc_s *ioc = iocpf->ioc;
998
999 bfa_trc(ioc, event);
1000
1001 switch (event) {
1002 case IOCPF_E_FWRSP_ENABLE:
1003 bfa_iocpf_timer_stop(ioc);
Maggie Zhangf7f73812010-12-09 19:08:43 -08001004 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001005 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
1006 break;
1007
1008 case IOCPF_E_INITFAIL:
1009 bfa_iocpf_timer_stop(ioc);
1010 /*
1011 * !!! fall through !!!
1012 */
1013
1014 case IOCPF_E_TIMEOUT:
Maggie Zhangf7f73812010-12-09 19:08:43 -08001015 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001016 if (event == IOCPF_E_TIMEOUT)
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001017 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001018 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001019 break;
1020
1021 case IOCPF_E_DISABLE:
1022 bfa_iocpf_timer_stop(ioc);
Maggie Zhangf7f73812010-12-09 19:08:43 -08001023 writel(1, ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001024 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1025 break;
1026
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001027 default:
1028 bfa_sm_fault(ioc, event);
1029 }
1030}
1031
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001032static void
1033bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
1034{
Maggie Zhangf7f73812010-12-09 19:08:43 -08001035 bfa_fsm_send_event(iocpf->ioc, IOC_E_ENABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001036}
1037
1038static void
1039bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1040{
1041 struct bfa_ioc_s *ioc = iocpf->ioc;
1042
1043 bfa_trc(ioc, event);
1044
1045 switch (event) {
1046 case IOCPF_E_DISABLE:
1047 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
1048 break;
1049
1050 case IOCPF_E_GETATTRFAIL:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001051 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001052 break;
1053
1054 case IOCPF_E_FAIL:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001055 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001056 break;
1057
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001058 default:
1059 bfa_sm_fault(ioc, event);
1060 }
1061}
1062
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001063static void
1064bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
1065{
1066 bfa_iocpf_timer_start(iocpf->ioc);
1067 bfa_ioc_send_disable(iocpf->ioc);
1068}
1069
Jing Huang5fbe25c2010-10-18 17:17:23 -07001070/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001071 * IOC is being disabled
1072 */
1073static void
1074bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1075{
1076 struct bfa_ioc_s *ioc = iocpf->ioc;
1077
1078 bfa_trc(ioc, event);
1079
1080 switch (event) {
1081 case IOCPF_E_FWRSP_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001082 bfa_iocpf_timer_stop(ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001083 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001084 break;
1085
1086 case IOCPF_E_FAIL:
1087 bfa_iocpf_timer_stop(ioc);
1088 /*
1089 * !!! fall through !!!
1090 */
1091
1092 case IOCPF_E_TIMEOUT:
Jing Huang53440262010-10-18 17:12:29 -07001093 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001094 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001095 break;
1096
1097 case IOCPF_E_FWRSP_ENABLE:
1098 break;
1099
1100 default:
1101 bfa_sm_fault(ioc, event);
1102 }
1103}
1104
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001105static void
1106bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf_s *iocpf)
1107{
1108 bfa_ioc_hw_sem_get(iocpf->ioc);
1109}
1110
Jing Huang8f4bfad2010-12-26 21:50:10 -08001111/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001112 * IOC hb ack request is being removed.
1113 */
1114static void
1115bfa_iocpf_sm_disabling_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1116{
1117 struct bfa_ioc_s *ioc = iocpf->ioc;
1118
1119 bfa_trc(ioc, event);
1120
1121 switch (event) {
1122 case IOCPF_E_SEMLOCKED:
1123 bfa_ioc_sync_leave(ioc);
1124 writel(1, ioc->ioc_regs.ioc_sem_reg);
1125 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1126 break;
1127
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001128 case IOCPF_E_SEM_ERROR:
1129 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1130 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1131 break;
1132
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001133 case IOCPF_E_FAIL:
1134 break;
1135
1136 default:
1137 bfa_sm_fault(ioc, event);
1138 }
1139}
1140
Jing Huang5fbe25c2010-10-18 17:17:23 -07001141/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001142 * IOC disable completion entry.
1143 */
1144static void
1145bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
1146{
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001147 bfa_ioc_mbox_flush(iocpf->ioc);
Maggie Zhangf7f73812010-12-09 19:08:43 -08001148 bfa_fsm_send_event(iocpf->ioc, IOC_E_DISABLED);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001149}
1150
1151static void
1152bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1153{
1154 struct bfa_ioc_s *ioc = iocpf->ioc;
1155
1156 bfa_trc(ioc, event);
1157
1158 switch (event) {
1159 case IOCPF_E_ENABLE:
1160 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1161 break;
1162
1163 case IOCPF_E_STOP:
1164 bfa_ioc_firmware_unlock(ioc);
1165 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1166 break;
1167
1168 default:
1169 bfa_sm_fault(ioc, event);
1170 }
1171}
1172
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001173static void
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001174bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf_s *iocpf)
1175{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001176 bfa_ioc_debug_save_ftrc(iocpf->ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001177 bfa_ioc_hw_sem_get(iocpf->ioc);
1178}
1179
Jing Huang8f4bfad2010-12-26 21:50:10 -08001180/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001181 * Hardware initialization failed.
1182 */
1183static void
1184bfa_iocpf_sm_initfail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1185{
1186 struct bfa_ioc_s *ioc = iocpf->ioc;
1187
1188 bfa_trc(ioc, event);
1189
1190 switch (event) {
1191 case IOCPF_E_SEMLOCKED:
1192 bfa_ioc_notify_fail(ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -07001193 bfa_ioc_sync_leave(ioc);
1194 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
1195 writel(1, ioc->ioc_regs.ioc_sem_reg);
1196 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001197 break;
1198
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001199 case IOCPF_E_SEM_ERROR:
1200 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1201 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1202 break;
1203
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001204 case IOCPF_E_DISABLE:
1205 bfa_sem_timer_stop(ioc);
1206 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1207 break;
1208
1209 case IOCPF_E_STOP:
1210 bfa_sem_timer_stop(ioc);
1211 bfa_ioc_firmware_unlock(ioc);
1212 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1213 break;
1214
1215 case IOCPF_E_FAIL:
1216 break;
1217
1218 default:
1219 bfa_sm_fault(ioc, event);
1220 }
1221}
1222
1223static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001224bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1225{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001226 bfa_trc(iocpf->ioc, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001227}
1228
Jing Huang5fbe25c2010-10-18 17:17:23 -07001229/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001230 * Hardware initialization failed.
1231 */
1232static void
1233bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1234{
1235 struct bfa_ioc_s *ioc = iocpf->ioc;
1236
1237 bfa_trc(ioc, event);
1238
1239 switch (event) {
1240 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001241 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1242 break;
1243
1244 case IOCPF_E_STOP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001245 bfa_ioc_firmware_unlock(ioc);
1246 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1247 break;
1248
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001249 default:
1250 bfa_sm_fault(ioc, event);
1251 }
1252}
1253
1254static void
1255bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf_s *iocpf)
1256{
Jing Huang8f4bfad2010-12-26 21:50:10 -08001257 /*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001258 * Mark IOC as failed in hardware and stop firmware.
1259 */
1260 bfa_ioc_lpu_stop(iocpf->ioc);
1261
Jing Huang8f4bfad2010-12-26 21:50:10 -08001262 /*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001263 * Flush any queued up mailbox requests.
1264 */
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001265 bfa_ioc_mbox_flush(iocpf->ioc);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001266
1267 bfa_ioc_hw_sem_get(iocpf->ioc);
1268}
1269
1270static void
1271bfa_iocpf_sm_fail_sync(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1272{
1273 struct bfa_ioc_s *ioc = iocpf->ioc;
1274
1275 bfa_trc(ioc, event);
1276
1277 switch (event) {
1278 case IOCPF_E_SEMLOCKED:
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001279 bfa_ioc_sync_ack(ioc);
1280 bfa_ioc_notify_fail(ioc);
1281 if (!iocpf->auto_recover) {
1282 bfa_ioc_sync_leave(ioc);
Krishna Gudipati775c7742011-06-13 15:52:12 -07001283 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001284 writel(1, ioc->ioc_regs.ioc_sem_reg);
1285 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1286 } else {
1287 if (bfa_ioc_sync_complete(ioc))
1288 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1289 else {
1290 writel(1, ioc->ioc_regs.ioc_sem_reg);
1291 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1292 }
1293 }
1294 break;
1295
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001296 case IOCPF_E_SEM_ERROR:
1297 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1298 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
1299 break;
1300
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08001301 case IOCPF_E_DISABLE:
1302 bfa_sem_timer_stop(ioc);
1303 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1304 break;
1305
1306 case IOCPF_E_FAIL:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001307 break;
1308
1309 default:
1310 bfa_sm_fault(ioc, event);
1311 }
1312}
1313
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001314static void
1315bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1316{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001317 bfa_trc(iocpf->ioc, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001318}
1319
Jing Huang5fbe25c2010-10-18 17:17:23 -07001320/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001321 * IOC is in failed state.
1322 */
1323static void
1324bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1325{
1326 struct bfa_ioc_s *ioc = iocpf->ioc;
1327
1328 bfa_trc(ioc, event);
1329
1330 switch (event) {
1331 case IOCPF_E_DISABLE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001332 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1333 break;
1334
Jing Huang7725ccf2009-09-23 17:46:15 -07001335 default:
1336 bfa_sm_fault(ioc, event);
1337 }
1338}
1339
Jing Huang5fbe25c2010-10-18 17:17:23 -07001340/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -08001341 * BFA IOC private functions
Jing Huang7725ccf2009-09-23 17:46:15 -07001342 */
1343
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001344/*
1345 * Notify common modules registered for notification.
1346 */
1347static void
1348bfa_ioc_event_notify(struct bfa_ioc_s *ioc, enum bfa_ioc_event_e event)
1349{
1350 struct bfa_ioc_notify_s *notify;
1351 struct list_head *qe;
1352
1353 list_for_each(qe, &ioc->notify_q) {
1354 notify = (struct bfa_ioc_notify_s *)qe;
1355 notify->cbfn(notify->cbarg, event);
1356 }
1357}
1358
Jing Huang7725ccf2009-09-23 17:46:15 -07001359static void
1360bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1361{
Jing Huang7725ccf2009-09-23 17:46:15 -07001362 ioc->cbfn->disable_cbfn(ioc->bfa);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001363 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
Jing Huang7725ccf2009-09-23 17:46:15 -07001364}
1365
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001366bfa_boolean_t
Jing Huang53440262010-10-18 17:12:29 -07001367bfa_ioc_sem_get(void __iomem *sem_reg)
Jing Huang7725ccf2009-09-23 17:46:15 -07001368{
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001369 u32 r32;
1370 int cnt = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001371#define BFA_SEM_SPINCNT 3000
Jing Huang7725ccf2009-09-23 17:46:15 -07001372
Jing Huang53440262010-10-18 17:12:29 -07001373 r32 = readl(sem_reg);
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001374
Krishna Gudipati11189202011-06-13 15:50:35 -07001375 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001376 cnt++;
Jing Huang6a18b162010-10-18 17:08:54 -07001377 udelay(2);
Jing Huang53440262010-10-18 17:12:29 -07001378 r32 = readl(sem_reg);
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001379 }
1380
Krishna Gudipati11189202011-06-13 15:50:35 -07001381 if (!(r32 & 1))
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001382 return BFA_TRUE;
1383
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001384 return BFA_FALSE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001385}
1386
Jing Huang7725ccf2009-09-23 17:46:15 -07001387static void
1388bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1389{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001390 u32 r32;
Jing Huang7725ccf2009-09-23 17:46:15 -07001391
Jing Huang5fbe25c2010-10-18 17:17:23 -07001392 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001393 * First read to the semaphore register will return 0, subsequent reads
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001394 * will return 1. Semaphore is released by writing 1 to the register
Jing Huang7725ccf2009-09-23 17:46:15 -07001395 */
Jing Huang53440262010-10-18 17:12:29 -07001396 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001397 if (r32 == ~0) {
1398 WARN_ON(r32 == ~0);
1399 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1400 return;
1401 }
Krishna Gudipati11189202011-06-13 15:50:35 -07001402 if (!(r32 & 1)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001403 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
Jing Huang7725ccf2009-09-23 17:46:15 -07001404 return;
1405 }
1406
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001407 bfa_sem_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001408}
1409
Jing Huang5fbe25c2010-10-18 17:17:23 -07001410/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001411 * Initialize LPU local memory (aka secondary memory / SRAM)
1412 */
1413static void
1414bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1415{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001416 u32 pss_ctl;
1417 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001418#define PSS_LMEM_INIT_TIME 10000
1419
Jing Huang53440262010-10-18 17:12:29 -07001420 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001421 pss_ctl &= ~__PSS_LMEM_RESET;
1422 pss_ctl |= __PSS_LMEM_INIT_EN;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001423
1424 /*
1425 * i2c workaround 12.5khz clock
1426 */
1427 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
Jing Huang53440262010-10-18 17:12:29 -07001428 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001429
Jing Huang5fbe25c2010-10-18 17:17:23 -07001430 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001431 * wait for memory initialization to be complete
1432 */
1433 i = 0;
1434 do {
Jing Huang53440262010-10-18 17:12:29 -07001435 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001436 i++;
1437 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1438
Jing Huang5fbe25c2010-10-18 17:17:23 -07001439 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001440 * If memory initialization is not successful, IOC timeout will catch
1441 * such failures.
1442 */
Jing Huangd4b671c2010-12-26 21:46:35 -08001443 WARN_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
Jing Huang7725ccf2009-09-23 17:46:15 -07001444 bfa_trc(ioc, pss_ctl);
1445
1446 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
Jing Huang53440262010-10-18 17:12:29 -07001447 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001448}
1449
1450static void
1451bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1452{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001453 u32 pss_ctl;
Jing Huang7725ccf2009-09-23 17:46:15 -07001454
Jing Huang5fbe25c2010-10-18 17:17:23 -07001455 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001456 * Take processor out of reset.
1457 */
Jing Huang53440262010-10-18 17:12:29 -07001458 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001459 pss_ctl &= ~__PSS_LPU0_RESET;
1460
Jing Huang53440262010-10-18 17:12:29 -07001461 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001462}
1463
1464static void
1465bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1466{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001467 u32 pss_ctl;
Jing Huang7725ccf2009-09-23 17:46:15 -07001468
Jing Huang5fbe25c2010-10-18 17:17:23 -07001469 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001470 * Put processors in reset.
1471 */
Jing Huang53440262010-10-18 17:12:29 -07001472 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001473 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1474
Jing Huang53440262010-10-18 17:12:29 -07001475 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001476}
1477
Jing Huang5fbe25c2010-10-18 17:17:23 -07001478/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001479 * Get driver and firmware versions.
1480 */
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001481void
Jing Huang7725ccf2009-09-23 17:46:15 -07001482bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1483{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001484 u32 pgnum, pgoff;
1485 u32 loff = 0;
1486 int i;
1487 u32 *fwsig = (u32 *) fwhdr;
Jing Huang7725ccf2009-09-23 17:46:15 -07001488
Maggie Zhangf7f73812010-12-09 19:08:43 -08001489 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1490 pgoff = PSS_SMEM_PGOFF(loff);
Jing Huang53440262010-10-18 17:12:29 -07001491 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001492
1493 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1494 i++) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001495 fwsig[i] =
1496 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
Jing Huang7725ccf2009-09-23 17:46:15 -07001497 loff += sizeof(u32);
1498 }
1499}
1500
Jing Huang5fbe25c2010-10-18 17:17:23 -07001501/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001502 * Returns TRUE if same.
1503 */
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001504bfa_boolean_t
Jing Huang7725ccf2009-09-23 17:46:15 -07001505bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1506{
1507 struct bfi_ioc_image_hdr_s *drv_fwhdr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001508 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001509
Jing Huang293f82d2010-07-08 19:45:20 -07001510 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
Krishna Gudipati11189202011-06-13 15:50:35 -07001511 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
Jing Huang7725ccf2009-09-23 17:46:15 -07001512
1513 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1514 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1515 bfa_trc(ioc, i);
1516 bfa_trc(ioc, fwhdr->md5sum[i]);
1517 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1518 return BFA_FALSE;
1519 }
1520 }
1521
1522 bfa_trc(ioc, fwhdr->md5sum[0]);
1523 return BFA_TRUE;
1524}
1525
Jing Huang5fbe25c2010-10-18 17:17:23 -07001526/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001527 * Return true if current running version is valid. Firmware signature and
1528 * execution context (driver/bios) must match.
1529 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001530static bfa_boolean_t
1531bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07001532{
1533 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1534
Jing Huang7725ccf2009-09-23 17:46:15 -07001535 bfa_ioc_fwver_get(ioc, &fwhdr);
Jing Huang293f82d2010-07-08 19:45:20 -07001536 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
Krishna Gudipati11189202011-06-13 15:50:35 -07001537 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
Jing Huang7725ccf2009-09-23 17:46:15 -07001538
1539 if (fwhdr.signature != drv_fwhdr->signature) {
1540 bfa_trc(ioc, fwhdr.signature);
1541 bfa_trc(ioc, drv_fwhdr->signature);
1542 return BFA_FALSE;
1543 }
1544
Krishna Gudipati11189202011-06-13 15:50:35 -07001545 if (swab32(fwhdr.bootenv) != boot_env) {
1546 bfa_trc(ioc, fwhdr.bootenv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001547 bfa_trc(ioc, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07001548 return BFA_FALSE;
1549 }
1550
1551 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1552}
1553
Jing Huang5fbe25c2010-10-18 17:17:23 -07001554/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001555 * Conditionally flush any pending message from firmware at start.
1556 */
1557static void
1558bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1559{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001560 u32 r32;
Jing Huang7725ccf2009-09-23 17:46:15 -07001561
Jing Huang53440262010-10-18 17:12:29 -07001562 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001563 if (r32)
Jing Huang53440262010-10-18 17:12:29 -07001564 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001565}
1566
Jing Huang7725ccf2009-09-23 17:46:15 -07001567static void
1568bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1569{
1570 enum bfi_ioc_state ioc_fwstate;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001571 bfa_boolean_t fwvalid;
1572 u32 boot_type;
1573 u32 boot_env;
Jing Huang7725ccf2009-09-23 17:46:15 -07001574
Jing Huang53440262010-10-18 17:12:29 -07001575 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07001576
1577 if (force)
1578 ioc_fwstate = BFI_IOC_UNINIT;
1579
1580 bfa_trc(ioc, ioc_fwstate);
1581
Krishna Gudipati11189202011-06-13 15:50:35 -07001582 boot_type = BFI_FWBOOT_TYPE_NORMAL;
1583 boot_env = BFI_FWBOOT_ENV_OS;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001584
Jing Huang5fbe25c2010-10-18 17:17:23 -07001585 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001586 * check if firmware is valid
1587 */
1588 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001589 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07001590
1591 if (!fwvalid) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001592 bfa_ioc_boot(ioc, boot_type, boot_env);
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001593 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001594 return;
1595 }
1596
Jing Huang5fbe25c2010-10-18 17:17:23 -07001597 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001598 * If hardware initialization is in progress (initialized by other IOC),
1599 * just wait for an initialization completion interrupt.
1600 */
1601 if (ioc_fwstate == BFI_IOC_INITING) {
Krishna Gudipati775c7742011-06-13 15:52:12 -07001602 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001603 return;
1604 }
1605
Jing Huang5fbe25c2010-10-18 17:17:23 -07001606 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001607 * If IOC function is disabled and firmware version is same,
1608 * just re-enable IOC.
Jing Huang07b28382010-07-08 19:59:24 -07001609 *
1610 * If option rom, IOC must not be in operational state. With
1611 * convergence, IOC will be in operational state when 2nd driver
1612 * is loaded.
Jing Huang7725ccf2009-09-23 17:46:15 -07001613 */
Jing Huang8f4bfad2010-12-26 21:50:10 -08001614 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001615
Jing Huang5fbe25c2010-10-18 17:17:23 -07001616 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001617 * When using MSI-X any pending firmware ready event should
1618 * be flushed. Otherwise MSI-X interrupts are not delivered.
1619 */
1620 bfa_ioc_msgflush(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001621 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
Jing Huang7725ccf2009-09-23 17:46:15 -07001622 return;
1623 }
1624
Jing Huang5fbe25c2010-10-18 17:17:23 -07001625 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001626 * Initialize the h/w for any other states.
1627 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001628 bfa_ioc_boot(ioc, boot_type, boot_env);
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001629 bfa_ioc_poll_fwinit(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001630}
1631
1632static void
1633bfa_ioc_timeout(void *ioc_arg)
1634{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001635 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
Jing Huang7725ccf2009-09-23 17:46:15 -07001636
1637 bfa_trc(ioc, 0);
1638 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1639}
1640
1641void
1642bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1643{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001644 u32 *msgp = (u32 *) ioc_msg;
1645 u32 i;
Jing Huang7725ccf2009-09-23 17:46:15 -07001646
1647 bfa_trc(ioc, msgp[0]);
1648 bfa_trc(ioc, len);
1649
Jing Huangd4b671c2010-12-26 21:46:35 -08001650 WARN_ON(len > BFI_IOC_MSGLEN_MAX);
Jing Huang7725ccf2009-09-23 17:46:15 -07001651
1652 /*
1653 * first write msg to mailbox registers
1654 */
1655 for (i = 0; i < len / sizeof(u32); i++)
Jing Huang53440262010-10-18 17:12:29 -07001656 writel(cpu_to_le32(msgp[i]),
1657 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
Jing Huang7725ccf2009-09-23 17:46:15 -07001658
1659 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
Jing Huang53440262010-10-18 17:12:29 -07001660 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
Jing Huang7725ccf2009-09-23 17:46:15 -07001661
1662 /*
1663 * write 1 to mailbox CMD to trigger LPU event
1664 */
Jing Huang53440262010-10-18 17:12:29 -07001665 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1666 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001667}
1668
1669static void
1670bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1671{
1672 struct bfi_ioc_ctrl_req_s enable_req;
Maggie Zhangf16a1752010-12-09 19:12:32 -08001673 struct timeval tv;
Jing Huang7725ccf2009-09-23 17:46:15 -07001674
1675 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1676 bfa_ioc_portid(ioc));
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001677 enable_req.clscode = cpu_to_be16(ioc->clscode);
Maggie Zhangf16a1752010-12-09 19:12:32 -08001678 do_gettimeofday(&tv);
Jing Huangba816ea2010-10-18 17:10:50 -07001679 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
Jing Huang7725ccf2009-09-23 17:46:15 -07001680 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1681}
1682
1683static void
1684bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1685{
1686 struct bfi_ioc_ctrl_req_s disable_req;
1687
1688 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1689 bfa_ioc_portid(ioc));
1690 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1691}
1692
1693static void
1694bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1695{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001696 struct bfi_ioc_getattr_req_s attr_req;
Jing Huang7725ccf2009-09-23 17:46:15 -07001697
1698 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1699 bfa_ioc_portid(ioc));
1700 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1701 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1702}
1703
1704static void
1705bfa_ioc_hb_check(void *cbarg)
1706{
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001707 struct bfa_ioc_s *ioc = cbarg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001708 u32 hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07001709
Jing Huang53440262010-10-18 17:12:29 -07001710 hb_count = readl(ioc->ioc_regs.heartbeat);
Jing Huang7725ccf2009-09-23 17:46:15 -07001711 if (ioc->hb_count == hb_count) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001712 bfa_ioc_recover(ioc);
1713 return;
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001714 } else {
1715 ioc->hb_count = hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07001716 }
1717
1718 bfa_ioc_mbox_poll(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001719 bfa_hb_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001720}
1721
1722static void
1723bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1724{
Jing Huang53440262010-10-18 17:12:29 -07001725 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001726 bfa_hb_timer_start(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001727}
1728
Jing Huang5fbe25c2010-10-18 17:17:23 -07001729/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001730 * Initiate a full firmware download.
Jing Huang7725ccf2009-09-23 17:46:15 -07001731 */
1732static void
1733bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001734 u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07001735{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001736 u32 *fwimg;
1737 u32 pgnum, pgoff;
1738 u32 loff = 0;
1739 u32 chunkno = 0;
1740 u32 i;
Krishna Gudipati11189202011-06-13 15:50:35 -07001741 u32 asicmode;
Jing Huang7725ccf2009-09-23 17:46:15 -07001742
Krishna Gudipati11189202011-06-13 15:50:35 -07001743 bfa_trc(ioc, bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)));
1744 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
Jing Huang7725ccf2009-09-23 17:46:15 -07001745
Maggie Zhangf7f73812010-12-09 19:08:43 -08001746 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1747 pgoff = PSS_SMEM_PGOFF(loff);
Jing Huang7725ccf2009-09-23 17:46:15 -07001748
Jing Huang53440262010-10-18 17:12:29 -07001749 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001750
Krishna Gudipati11189202011-06-13 15:50:35 -07001751 for (i = 0; i < bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); i++) {
Jing Huang7725ccf2009-09-23 17:46:15 -07001752
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001753 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1754 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
Krishna Gudipati11189202011-06-13 15:50:35 -07001755 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001756 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
Jing Huang7725ccf2009-09-23 17:46:15 -07001757 }
1758
Jing Huang5fbe25c2010-10-18 17:17:23 -07001759 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001760 * write smem
1761 */
1762 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001763 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
Jing Huang7725ccf2009-09-23 17:46:15 -07001764
1765 loff += sizeof(u32);
1766
Jing Huang5fbe25c2010-10-18 17:17:23 -07001767 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001768 * handle page offset wrap around
1769 */
1770 loff = PSS_SMEM_PGOFF(loff);
1771 if (loff == 0) {
1772 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001773 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Jing Huang7725ccf2009-09-23 17:46:15 -07001774 }
1775 }
1776
Maggie Zhangf7f73812010-12-09 19:08:43 -08001777 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1778 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipati13cc20c2010-03-05 19:37:29 -08001779
1780 /*
Krishna Gudipati11189202011-06-13 15:50:35 -07001781 * Set boot type and device mode at the end.
1782 */
1783 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
1784 ioc->port0_mode, ioc->port1_mode);
1785 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_DEVMODE_OFF,
1786 swab32(asicmode));
1787 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_TYPE_OFF,
Jing Huang53440262010-10-18 17:12:29 -07001788 swab32(boot_type));
Krishna Gudipati11189202011-06-13 15:50:35 -07001789 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_FWBOOT_ENV_OFF,
Jing Huang53440262010-10-18 17:12:29 -07001790 swab32(boot_env));
Jing Huang7725ccf2009-09-23 17:46:15 -07001791}
1792
Jing Huang7725ccf2009-09-23 17:46:15 -07001793
Jing Huang5fbe25c2010-10-18 17:17:23 -07001794/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001795 * Update BFA configuration from firmware configuration.
1796 */
1797static void
1798bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1799{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001800 struct bfi_ioc_attr_s *attr = ioc->attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07001801
Jing Huangba816ea2010-10-18 17:10:50 -07001802 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1803 attr->card_type = be32_to_cpu(attr->card_type);
1804 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001805 ioc->fcmode = (attr->port_mode == BFI_PORT_MODE_FC);
Jing Huang7725ccf2009-09-23 17:46:15 -07001806
1807 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1808}
1809
Jing Huang5fbe25c2010-10-18 17:17:23 -07001810/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001811 * Attach time initialization of mbox logic.
1812 */
1813static void
1814bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1815{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001816 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1817 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07001818
1819 INIT_LIST_HEAD(&mod->cmd_q);
1820 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1821 mod->mbhdlr[mc].cbfn = NULL;
1822 mod->mbhdlr[mc].cbarg = ioc->bfa;
1823 }
1824}
1825
Jing Huang5fbe25c2010-10-18 17:17:23 -07001826/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001827 * Mbox poll timer -- restarts any pending mailbox requests.
1828 */
1829static void
1830bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1831{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001832 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1833 struct bfa_mbox_cmd_s *cmd;
1834 u32 stat;
Jing Huang7725ccf2009-09-23 17:46:15 -07001835
Jing Huang5fbe25c2010-10-18 17:17:23 -07001836 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001837 * If no command pending, do nothing
1838 */
1839 if (list_empty(&mod->cmd_q))
1840 return;
1841
Jing Huang5fbe25c2010-10-18 17:17:23 -07001842 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001843 * If previous command is not yet fetched by firmware, do nothing
1844 */
Jing Huang53440262010-10-18 17:12:29 -07001845 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07001846 if (stat)
1847 return;
1848
Jing Huang5fbe25c2010-10-18 17:17:23 -07001849 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001850 * Enqueue command to firmware.
1851 */
1852 bfa_q_deq(&mod->cmd_q, &cmd);
1853 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1854}
1855
Jing Huang5fbe25c2010-10-18 17:17:23 -07001856/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001857 * Cleanup any pending requests.
1858 */
1859static void
Krishna Gudipati8b070b42011-06-13 15:52:40 -07001860bfa_ioc_mbox_flush(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -07001861{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001862 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1863 struct bfa_mbox_cmd_s *cmd;
Jing Huang7725ccf2009-09-23 17:46:15 -07001864
1865 while (!list_empty(&mod->cmd_q))
1866 bfa_q_deq(&mod->cmd_q, &cmd);
1867}
1868
Jing Huang5fbe25c2010-10-18 17:17:23 -07001869/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001870 * Read data from SMEM to host through PCI memmap
1871 *
1872 * @param[in] ioc memory for IOC
1873 * @param[in] tbuf app memory to store data from smem
1874 * @param[in] soff smem offset
1875 * @param[in] sz size of smem in bytes
Jing Huang7725ccf2009-09-23 17:46:15 -07001876 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001877static bfa_status_t
1878bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1879{
Maggie50444a32010-11-29 18:26:32 -08001880 u32 pgnum, loff;
1881 __be32 r32;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001882 int i, len;
1883 u32 *buf = tbuf;
1884
Maggie Zhangf7f73812010-12-09 19:08:43 -08001885 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1886 loff = PSS_SMEM_PGOFF(soff);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001887 bfa_trc(ioc, pgnum);
1888 bfa_trc(ioc, loff);
1889 bfa_trc(ioc, sz);
1890
1891 /*
1892 * Hold semaphore to serialize pll init and fwtrc.
1893 */
1894 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1895 bfa_trc(ioc, 0);
1896 return BFA_STATUS_FAILED;
1897 }
1898
Jing Huang53440262010-10-18 17:12:29 -07001899 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001900
1901 len = sz/sizeof(u32);
1902 bfa_trc(ioc, len);
1903 for (i = 0; i < len; i++) {
1904 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
Jing Huangba816ea2010-10-18 17:10:50 -07001905 buf[i] = be32_to_cpu(r32);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001906 loff += sizeof(u32);
1907
Jing Huang5fbe25c2010-10-18 17:17:23 -07001908 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001909 * handle page offset wrap around
1910 */
1911 loff = PSS_SMEM_PGOFF(loff);
1912 if (loff == 0) {
1913 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001914 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001915 }
1916 }
Maggie Zhangf7f73812010-12-09 19:08:43 -08001917 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1918 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001919 /*
1920 * release semaphore.
1921 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001922 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f73812010-12-09 19:08:43 -08001923 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001924
1925 bfa_trc(ioc, pgnum);
1926 return BFA_STATUS_OK;
1927}
1928
Jing Huang5fbe25c2010-10-18 17:17:23 -07001929/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001930 * Clear SMEM data from host through PCI memmap
1931 *
1932 * @param[in] ioc memory for IOC
1933 * @param[in] soff smem offset
1934 * @param[in] sz size of smem in bytes
1935 */
1936static bfa_status_t
1937bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1938{
1939 int i, len;
1940 u32 pgnum, loff;
1941
Maggie Zhangf7f73812010-12-09 19:08:43 -08001942 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
1943 loff = PSS_SMEM_PGOFF(soff);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001944 bfa_trc(ioc, pgnum);
1945 bfa_trc(ioc, loff);
1946 bfa_trc(ioc, sz);
1947
1948 /*
1949 * Hold semaphore to serialize pll init and fwtrc.
1950 */
1951 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1952 bfa_trc(ioc, 0);
1953 return BFA_STATUS_FAILED;
1954 }
1955
Jing Huang53440262010-10-18 17:12:29 -07001956 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001957
1958 len = sz/sizeof(u32); /* len in words */
1959 bfa_trc(ioc, len);
1960 for (i = 0; i < len; i++) {
1961 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1962 loff += sizeof(u32);
1963
Jing Huang5fbe25c2010-10-18 17:17:23 -07001964 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001965 * handle page offset wrap around
1966 */
1967 loff = PSS_SMEM_PGOFF(loff);
1968 if (loff == 0) {
1969 pgnum++;
Jing Huang53440262010-10-18 17:12:29 -07001970 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001971 }
1972 }
Maggie Zhangf7f73812010-12-09 19:08:43 -08001973 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
1974 ioc->ioc_regs.host_page_num_fn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001975
1976 /*
1977 * release semaphore.
1978 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07001979 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f73812010-12-09 19:08:43 -08001980 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001981 bfa_trc(ioc, pgnum);
1982 return BFA_STATUS_OK;
1983}
1984
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001985static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001986bfa_ioc_fail_notify(struct bfa_ioc_s *ioc)
1987{
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001988 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1989
Jing Huang8f4bfad2010-12-26 21:50:10 -08001990 /*
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001991 * Notify driver and common modules registered for notification.
1992 */
1993 ioc->cbfn->hbfail_cbfn(ioc->bfa);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001994 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08001995
1996 bfa_ioc_debug_save_ftrc(ioc);
1997
1998 BFA_LOG(KERN_CRIT, bfad, bfa_log_level,
1999 "Heart Beat of IOC has failed\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -07002000 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_HBFAIL);
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08002001
2002}
2003
2004static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002005bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
2006{
2007 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
Jing Huang5fbe25c2010-10-18 17:17:23 -07002008 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002009 * Provide enable completion callback.
2010 */
2011 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
Jing Huang88166242010-12-09 17:11:53 -08002012 BFA_LOG(KERN_WARNING, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002013 "Running firmware version is incompatible "
2014 "with the driver version\n");
Krishna Gudipati7826f302011-07-20 16:59:13 -07002015 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_FWMISMATCH);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002016}
2017
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002018bfa_status_t
2019bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
2020{
2021
2022 /*
2023 * Hold semaphore so that nobody can access the chip during init.
2024 */
2025 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2026
2027 bfa_ioc_pll_init_asic(ioc);
2028
2029 ioc->pllinit = BFA_TRUE;
Krishna Gudipati89196782012-03-13 17:38:56 -07002030
2031 /*
2032 * Initialize LMEM
2033 */
2034 bfa_ioc_lmem_init(ioc);
2035
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002036 /*
2037 * release semaphore.
2038 */
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002039 readl(ioc->ioc_regs.ioc_init_sem_reg);
Maggie Zhangf7f73812010-12-09 19:08:43 -08002040 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002041
2042 return BFA_STATUS_OK;
2043}
Jing Huang7725ccf2009-09-23 17:46:15 -07002044
Jing Huang5fbe25c2010-10-18 17:17:23 -07002045/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002046 * Interface used by diag module to do firmware boot with memory test
2047 * as the entry vector.
2048 */
2049void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002050bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
Jing Huang7725ccf2009-09-23 17:46:15 -07002051{
Jing Huang7725ccf2009-09-23 17:46:15 -07002052 bfa_ioc_stats(ioc, ioc_boots);
2053
2054 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2055 return;
2056
Jing Huang5fbe25c2010-10-18 17:17:23 -07002057 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002058 * Initialize IOC state of all functions on a chip reset.
2059 */
Krishna Gudipati11189202011-06-13 15:50:35 -07002060 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2061 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.ioc_fwstate);
2062 writel(BFI_IOC_MEMTEST, ioc->ioc_regs.alt_ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002063 } else {
Krishna Gudipati11189202011-06-13 15:50:35 -07002064 writel(BFI_IOC_INITING, ioc->ioc_regs.ioc_fwstate);
2065 writel(BFI_IOC_INITING, ioc->ioc_regs.alt_ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002066 }
2067
Jing Huang07b28382010-07-08 19:59:24 -07002068 bfa_ioc_msgflush(ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002069 bfa_ioc_download_fw(ioc, boot_type, boot_env);
Jing Huang7725ccf2009-09-23 17:46:15 -07002070 bfa_ioc_lpu_start(ioc);
2071}
2072
Jing Huang5fbe25c2010-10-18 17:17:23 -07002073/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002074 * Enable/disable IOC failure auto recovery.
2075 */
2076void
2077bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
2078{
Krishna Gudipati2f9b8852010-03-03 17:42:51 -08002079 bfa_auto_recover = auto_recover;
Jing Huang7725ccf2009-09-23 17:46:15 -07002080}
2081
2082
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002083
Jing Huang7725ccf2009-09-23 17:46:15 -07002084bfa_boolean_t
2085bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
2086{
2087 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2088}
2089
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002090bfa_boolean_t
2091bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
2092{
Jing Huang53440262010-10-18 17:12:29 -07002093 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002094
2095 return ((r32 != BFI_IOC_UNINIT) &&
2096 (r32 != BFI_IOC_INITING) &&
2097 (r32 != BFI_IOC_MEMTEST));
2098}
2099
Krishna Gudipati11189202011-06-13 15:50:35 -07002100bfa_boolean_t
Jing Huang7725ccf2009-09-23 17:46:15 -07002101bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
2102{
Maggie50444a32010-11-29 18:26:32 -08002103 __be32 *msgp = mbmsg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002104 u32 r32;
2105 int i;
Jing Huang7725ccf2009-09-23 17:46:15 -07002106
Krishna Gudipati11189202011-06-13 15:50:35 -07002107 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2108 if ((r32 & 1) == 0)
2109 return BFA_FALSE;
2110
Jing Huang5fbe25c2010-10-18 17:17:23 -07002111 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002112 * read the MBOX msg
2113 */
2114 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2115 i++) {
Jing Huang53440262010-10-18 17:12:29 -07002116 r32 = readl(ioc->ioc_regs.lpu_mbox +
Jing Huang7725ccf2009-09-23 17:46:15 -07002117 i * sizeof(u32));
Jing Huangba816ea2010-10-18 17:10:50 -07002118 msgp[i] = cpu_to_be32(r32);
Jing Huang7725ccf2009-09-23 17:46:15 -07002119 }
2120
Jing Huang5fbe25c2010-10-18 17:17:23 -07002121 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002122 * turn off mailbox interrupt by clearing mailbox status
2123 */
Jing Huang53440262010-10-18 17:12:29 -07002124 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2125 readl(ioc->ioc_regs.lpu_mbox_cmd);
Krishna Gudipati11189202011-06-13 15:50:35 -07002126
2127 return BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -07002128}
2129
2130void
2131bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
2132{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002133 union bfi_ioc_i2h_msg_u *msg;
2134 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
Jing Huang7725ccf2009-09-23 17:46:15 -07002135
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002136 msg = (union bfi_ioc_i2h_msg_u *) m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002137
2138 bfa_ioc_stats(ioc, ioc_isrs);
2139
2140 switch (msg->mh.msg_id) {
2141 case BFI_IOC_I2H_HBEAT:
2142 break;
2143
Jing Huang7725ccf2009-09-23 17:46:15 -07002144 case BFI_IOC_I2H_ENABLE_REPLY:
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002145 ioc->port_mode = ioc->port_mode_cfg =
2146 (enum bfa_mode_s)msg->fw_event.port_mode;
2147 ioc->ad_cap_bm = msg->fw_event.cap_bm;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002148 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -07002149 break;
2150
2151 case BFI_IOC_I2H_DISABLE_REPLY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002152 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
Jing Huang7725ccf2009-09-23 17:46:15 -07002153 break;
2154
2155 case BFI_IOC_I2H_GETATTR_REPLY:
2156 bfa_ioc_getattr_reply(ioc);
2157 break;
2158
Krishna Gudipatia7141342011-06-24 20:23:19 -07002159 case BFI_IOC_I2H_ACQ_ADDR_REPLY:
2160 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ACQ_ADDR);
2161 break;
2162
Jing Huang7725ccf2009-09-23 17:46:15 -07002163 default:
2164 bfa_trc(ioc, msg->mh.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08002165 WARN_ON(1);
Jing Huang7725ccf2009-09-23 17:46:15 -07002166 }
2167}
2168
Jing Huang5fbe25c2010-10-18 17:17:23 -07002169/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002170 * IOC attach time initialization and setup.
2171 *
2172 * @param[in] ioc memory for IOC
2173 * @param[in] bfa driver instance structure
Jing Huang7725ccf2009-09-23 17:46:15 -07002174 */
2175void
2176bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002177 struct bfa_timer_mod_s *timer_mod)
Jing Huang7725ccf2009-09-23 17:46:15 -07002178{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002179 ioc->bfa = bfa;
2180 ioc->cbfn = cbfn;
2181 ioc->timer_mod = timer_mod;
2182 ioc->fcmode = BFA_FALSE;
2183 ioc->pllinit = BFA_FALSE;
Jing Huang7725ccf2009-09-23 17:46:15 -07002184 ioc->dbg_fwsave_once = BFA_TRUE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002185 ioc->iocpf.ioc = ioc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002186
2187 bfa_ioc_mbox_attach(ioc);
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002188 INIT_LIST_HEAD(&ioc->notify_q);
Jing Huang7725ccf2009-09-23 17:46:15 -07002189
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002190 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2191 bfa_fsm_send_event(ioc, IOC_E_RESET);
Jing Huang7725ccf2009-09-23 17:46:15 -07002192}
2193
Jing Huang5fbe25c2010-10-18 17:17:23 -07002194/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002195 * Driver detach time IOC cleanup.
2196 */
2197void
2198bfa_ioc_detach(struct bfa_ioc_s *ioc)
2199{
2200 bfa_fsm_send_event(ioc, IOC_E_DETACH);
Krishna Gudipati3350d982011-06-24 20:28:37 -07002201 INIT_LIST_HEAD(&ioc->notify_q);
Jing Huang7725ccf2009-09-23 17:46:15 -07002202}
2203
Jing Huang5fbe25c2010-10-18 17:17:23 -07002204/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002205 * Setup IOC PCI properties.
2206 *
2207 * @param[in] pcidev PCI device information for this IOC
2208 */
2209void
2210bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002211 enum bfi_pcifn_class clscode)
Jing Huang7725ccf2009-09-23 17:46:15 -07002212{
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002213 ioc->clscode = clscode;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002214 ioc->pcidev = *pcidev;
Krishna Gudipati11189202011-06-13 15:50:35 -07002215
2216 /*
2217 * Initialize IOC and device personality
2218 */
2219 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2220 ioc->asic_mode = BFI_ASIC_MODE_FC;
2221
2222 switch (pcidev->device_id) {
2223 case BFA_PCI_DEVICE_ID_FC_8G1P:
2224 case BFA_PCI_DEVICE_ID_FC_8G2P:
2225 ioc->asic_gen = BFI_ASIC_GEN_CB;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002226 ioc->fcmode = BFA_TRUE;
2227 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2228 ioc->ad_cap_bm = BFA_CM_HBA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002229 break;
2230
2231 case BFA_PCI_DEVICE_ID_CT:
2232 ioc->asic_gen = BFI_ASIC_GEN_CT;
2233 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2234 ioc->asic_mode = BFI_ASIC_MODE_ETH;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002235 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2236 ioc->ad_cap_bm = BFA_CM_CNA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002237 break;
2238
2239 case BFA_PCI_DEVICE_ID_CT_FC:
2240 ioc->asic_gen = BFI_ASIC_GEN_CT;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002241 ioc->fcmode = BFA_TRUE;
2242 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2243 ioc->ad_cap_bm = BFA_CM_HBA;
Krishna Gudipati11189202011-06-13 15:50:35 -07002244 break;
2245
2246 case BFA_PCI_DEVICE_ID_CT2:
2247 ioc->asic_gen = BFI_ASIC_GEN_CT2;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002248 if (clscode == BFI_PCIFN_CLASS_FC &&
2249 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
Krishna Gudipati11189202011-06-13 15:50:35 -07002250 ioc->asic_mode = BFI_ASIC_MODE_FC16;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002251 ioc->fcmode = BFA_TRUE;
2252 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2253 ioc->ad_cap_bm = BFA_CM_HBA;
2254 } else {
Krishna Gudipati11189202011-06-13 15:50:35 -07002255 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002256 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2257 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2258 ioc->port_mode =
2259 ioc->port_mode_cfg = BFA_MODE_CNA;
2260 ioc->ad_cap_bm = BFA_CM_CNA;
2261 } else {
2262 ioc->port_mode =
2263 ioc->port_mode_cfg = BFA_MODE_NIC;
2264 ioc->ad_cap_bm = BFA_CM_NIC;
2265 }
Krishna Gudipati11189202011-06-13 15:50:35 -07002266 }
2267 break;
2268
2269 default:
2270 WARN_ON(1);
2271 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002272
Jing Huang5fbe25c2010-10-18 17:17:23 -07002273 /*
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002274 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2275 */
Krishna Gudipati11189202011-06-13 15:50:35 -07002276 if (ioc->asic_gen == BFI_ASIC_GEN_CB)
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002277 bfa_ioc_set_cb_hwif(ioc);
Krishna Gudipati11189202011-06-13 15:50:35 -07002278 else if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2279 bfa_ioc_set_ct_hwif(ioc);
2280 else {
2281 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2282 bfa_ioc_set_ct2_hwif(ioc);
2283 bfa_ioc_ct2_poweron(ioc);
2284 }
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002285
Jing Huang7725ccf2009-09-23 17:46:15 -07002286 bfa_ioc_map_port(ioc);
2287 bfa_ioc_reg_init(ioc);
2288}
2289
Jing Huang5fbe25c2010-10-18 17:17:23 -07002290/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002291 * Initialize IOC dma memory
2292 *
2293 * @param[in] dm_kva kernel virtual address of IOC dma memory
2294 * @param[in] dm_pa physical address of IOC dma memory
2295 */
2296void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002297bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
Jing Huang7725ccf2009-09-23 17:46:15 -07002298{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002299 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002300 * dma memory for firmware attribute
2301 */
2302 ioc->attr_dma.kva = dm_kva;
2303 ioc->attr_dma.pa = dm_pa;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002304 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
Jing Huang7725ccf2009-09-23 17:46:15 -07002305}
2306
Jing Huang7725ccf2009-09-23 17:46:15 -07002307void
2308bfa_ioc_enable(struct bfa_ioc_s *ioc)
2309{
2310 bfa_ioc_stats(ioc, ioc_enables);
2311 ioc->dbg_fwsave_once = BFA_TRUE;
2312
2313 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2314}
2315
2316void
2317bfa_ioc_disable(struct bfa_ioc_s *ioc)
2318{
2319 bfa_ioc_stats(ioc, ioc_disables);
2320 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2321}
2322
Jing Huang7725ccf2009-09-23 17:46:15 -07002323
Jing Huang5fbe25c2010-10-18 17:17:23 -07002324/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002325 * Initialize memory for saving firmware trace. Driver must initialize
2326 * trace memory before call bfa_ioc_enable().
2327 */
2328void
2329bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2330{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002331 ioc->dbg_fwsave = dbg_fwsave;
Maggie Zhangf7f73812010-12-09 19:08:43 -08002332 ioc->dbg_fwsave_len = (ioc->iocpf.auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07002333}
2334
Jing Huang5fbe25c2010-10-18 17:17:23 -07002335/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002336 * Register mailbox message handler functions
2337 *
2338 * @param[in] ioc IOC instance
2339 * @param[in] mcfuncs message class handler functions
2340 */
2341void
2342bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2343{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002344 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2345 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002346
2347 for (mc = 0; mc < BFI_MC_MAX; mc++)
2348 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2349}
2350
Jing Huang5fbe25c2010-10-18 17:17:23 -07002351/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002352 * Register mailbox message handler function, to be called by common modules
2353 */
2354void
2355bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2356 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2357{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002358 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
Jing Huang7725ccf2009-09-23 17:46:15 -07002359
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002360 mod->mbhdlr[mc].cbfn = cbfn;
2361 mod->mbhdlr[mc].cbarg = cbarg;
Jing Huang7725ccf2009-09-23 17:46:15 -07002362}
2363
Jing Huang5fbe25c2010-10-18 17:17:23 -07002364/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002365 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2366 * Responsibility of caller to serialize
2367 *
2368 * @param[in] ioc IOC instance
2369 * @param[i] cmd Mailbox command
2370 */
2371void
2372bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2373{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002374 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2375 u32 stat;
Jing Huang7725ccf2009-09-23 17:46:15 -07002376
Jing Huang5fbe25c2010-10-18 17:17:23 -07002377 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002378 * If a previous command is pending, queue new command
2379 */
2380 if (!list_empty(&mod->cmd_q)) {
2381 list_add_tail(&cmd->qe, &mod->cmd_q);
2382 return;
2383 }
2384
Jing Huang5fbe25c2010-10-18 17:17:23 -07002385 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002386 * If mailbox is busy, queue command for poll timer
2387 */
Jing Huang53440262010-10-18 17:12:29 -07002388 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
Jing Huang7725ccf2009-09-23 17:46:15 -07002389 if (stat) {
2390 list_add_tail(&cmd->qe, &mod->cmd_q);
2391 return;
2392 }
2393
Jing Huang5fbe25c2010-10-18 17:17:23 -07002394 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07002395 * mailbox is free -- queue command to firmware
2396 */
2397 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2398}
2399
Jing Huang5fbe25c2010-10-18 17:17:23 -07002400/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002401 * Handle mailbox interrupts
2402 */
2403void
2404bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2405{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002406 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2407 struct bfi_mbmsg_s m;
2408 int mc;
Jing Huang7725ccf2009-09-23 17:46:15 -07002409
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002410 if (bfa_ioc_msgget(ioc, &m)) {
2411 /*
2412 * Treat IOC message class as special.
2413 */
2414 mc = m.mh.msg_class;
2415 if (mc == BFI_MC_IOC) {
2416 bfa_ioc_isr(ioc, &m);
2417 return;
2418 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002419
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002420 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2421 return;
2422
2423 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
Jing Huang7725ccf2009-09-23 17:46:15 -07002424 }
2425
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002426 bfa_ioc_lpu_read_stat(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002427
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002428 /*
2429 * Try to send pending mailbox commands
2430 */
2431 bfa_ioc_mbox_poll(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002432}
2433
2434void
2435bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2436{
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002437 bfa_ioc_stats(ioc, ioc_hbfails);
2438 ioc->stats.hb_count = ioc->hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07002439 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2440}
2441
Jing Huang5fbe25c2010-10-18 17:17:23 -07002442/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002443 * return true if IOC is disabled
2444 */
2445bfa_boolean_t
2446bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2447{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002448 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2449 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
Jing Huang7725ccf2009-09-23 17:46:15 -07002450}
2451
Jing Huang5fbe25c2010-10-18 17:17:23 -07002452/*
Krishna Gudipatia7141342011-06-24 20:23:19 -07002453 * Return TRUE if IOC is in acquiring address state
2454 */
2455bfa_boolean_t
2456bfa_ioc_is_acq_addr(struct bfa_ioc_s *ioc)
2457{
2458 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_acq_addr);
2459}
2460
2461/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002462 * return true if IOC firmware is different.
2463 */
2464bfa_boolean_t
2465bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2466{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002467 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2468 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2469 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
Jing Huang7725ccf2009-09-23 17:46:15 -07002470}
2471
2472#define bfa_ioc_state_disabled(__sm) \
2473 (((__sm) == BFI_IOC_UNINIT) || \
2474 ((__sm) == BFI_IOC_INITING) || \
2475 ((__sm) == BFI_IOC_HWINIT) || \
2476 ((__sm) == BFI_IOC_DISABLED) || \
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002477 ((__sm) == BFI_IOC_FAIL) || \
Jing Huang7725ccf2009-09-23 17:46:15 -07002478 ((__sm) == BFI_IOC_CFG_DISABLED))
2479
Jing Huang5fbe25c2010-10-18 17:17:23 -07002480/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002481 * Check if adapter is disabled -- both IOCs should be in a disabled
2482 * state.
2483 */
2484bfa_boolean_t
2485bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2486{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002487 u32 ioc_state;
Jing Huang7725ccf2009-09-23 17:46:15 -07002488
2489 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2490 return BFA_FALSE;
2491
Krishna Gudipati11189202011-06-13 15:50:35 -07002492 ioc_state = readl(ioc->ioc_regs.ioc_fwstate);
Jing Huang7725ccf2009-09-23 17:46:15 -07002493 if (!bfa_ioc_state_disabled(ioc_state))
2494 return BFA_FALSE;
2495
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002496 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
Krishna Gudipati11189202011-06-13 15:50:35 -07002497 ioc_state = readl(ioc->ioc_regs.alt_ioc_fwstate);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002498 if (!bfa_ioc_state_disabled(ioc_state))
2499 return BFA_FALSE;
2500 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002501
2502 return BFA_TRUE;
2503}
2504
Jing Huang8f4bfad2010-12-26 21:50:10 -08002505/*
Krishna Gudipatif1d584d2010-12-13 16:17:11 -08002506 * Reset IOC fwstate registers.
2507 */
2508void
2509bfa_ioc_reset_fwstate(struct bfa_ioc_s *ioc)
2510{
2511 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
2512 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
2513}
2514
Jing Huang7725ccf2009-09-23 17:46:15 -07002515#define BFA_MFG_NAME "Brocade"
2516void
2517bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2518 struct bfa_adapter_attr_s *ad_attr)
2519{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002520 struct bfi_ioc_attr_s *ioc_attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07002521
2522 ioc_attr = ioc->attr;
Jing Huang7725ccf2009-09-23 17:46:15 -07002523
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002524 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2525 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2526 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2527 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
Jing Huang6a18b162010-10-18 17:08:54 -07002528 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
Jing Huang7725ccf2009-09-23 17:46:15 -07002529 sizeof(struct bfa_mfg_vpd_s));
2530
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002531 ad_attr->nports = bfa_ioc_get_nports(ioc);
2532 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002533
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002534 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2535 /* For now, model descr uses same model string */
2536 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
Jing Huang7725ccf2009-09-23 17:46:15 -07002537
Jing Huanged969322010-07-08 19:45:56 -07002538 ad_attr->card_type = ioc_attr->card_type;
2539 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2540
Jing Huang7725ccf2009-09-23 17:46:15 -07002541 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2542 ad_attr->prototype = 1;
2543 else
2544 ad_attr->prototype = 0;
2545
Maggie Zhangf7f73812010-12-09 19:08:43 -08002546 ad_attr->pwwn = ioc->attr->pwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002547 ad_attr->mac = bfa_ioc_get_mac(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002548
2549 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2550 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2551 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2552 ad_attr->asic_rev = ioc_attr->asic_rev;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002553
2554 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
Jing Huang7725ccf2009-09-23 17:46:15 -07002555
Krishna Gudipati11189202011-06-13 15:50:35 -07002556 ad_attr->cna_capable = bfa_ioc_is_cna(ioc);
2557 ad_attr->trunk_capable = (ad_attr->nports > 1) &&
2558 !bfa_ioc_is_cna(ioc) && !ad_attr->is_mezz;
Jing Huang7725ccf2009-09-23 17:46:15 -07002559}
2560
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002561enum bfa_ioc_type_e
2562bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2563{
Krishna Gudipati11189202011-06-13 15:50:35 -07002564 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002565 return BFA_IOC_TYPE_LL;
Krishna Gudipati11189202011-06-13 15:50:35 -07002566
2567 WARN_ON(ioc->clscode != BFI_PCIFN_CLASS_FC);
2568
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002569 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
Krishna Gudipati11189202011-06-13 15:50:35 -07002570 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002571}
2572
Jing Huang7725ccf2009-09-23 17:46:15 -07002573void
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002574bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2575{
Jing Huang6a18b162010-10-18 17:08:54 -07002576 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2577 memcpy((void *)serial_num,
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002578 (void *)ioc->attr->brcd_serialnum,
2579 BFA_ADAPTER_SERIAL_NUM_LEN);
2580}
2581
2582void
2583bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2584{
Jing Huang6a18b162010-10-18 17:08:54 -07002585 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2586 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002587}
2588
2589void
2590bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2591{
Jing Huangd4b671c2010-12-26 21:46:35 -08002592 WARN_ON(!chip_rev);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002593
Jing Huang6a18b162010-10-18 17:08:54 -07002594 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002595
2596 chip_rev[0] = 'R';
2597 chip_rev[1] = 'e';
2598 chip_rev[2] = 'v';
2599 chip_rev[3] = '-';
2600 chip_rev[4] = ioc->attr->asic_rev;
2601 chip_rev[5] = '\0';
2602}
2603
2604void
2605bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2606{
Jing Huang6a18b162010-10-18 17:08:54 -07002607 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2608 memcpy(optrom_ver, ioc->attr->optrom_version,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002609 BFA_VERSION_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002610}
2611
2612void
2613bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2614{
Jing Huang6a18b162010-10-18 17:08:54 -07002615 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2616 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002617}
2618
2619void
2620bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2621{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002622 struct bfi_ioc_attr_s *ioc_attr;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002623
Jing Huangd4b671c2010-12-26 21:46:35 -08002624 WARN_ON(!model);
Jing Huang6a18b162010-10-18 17:08:54 -07002625 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002626
2627 ioc_attr = ioc->attr;
2628
Krishna Gudipati10a07372011-06-24 20:23:38 -07002629 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
Krishna Gudipati8b070b42011-06-13 15:52:40 -07002630 BFA_MFG_NAME, ioc_attr->card_type);
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002631}
2632
2633enum bfa_ioc_state
2634bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2635{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002636 enum bfa_iocpf_state iocpf_st;
2637 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2638
2639 if (ioc_st == BFA_IOC_ENABLING ||
2640 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2641
2642 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2643
2644 switch (iocpf_st) {
2645 case BFA_IOCPF_SEMWAIT:
2646 ioc_st = BFA_IOC_SEMWAIT;
2647 break;
2648
2649 case BFA_IOCPF_HWINIT:
2650 ioc_st = BFA_IOC_HWINIT;
2651 break;
2652
2653 case BFA_IOCPF_FWMISMATCH:
2654 ioc_st = BFA_IOC_FWMISMATCH;
2655 break;
2656
2657 case BFA_IOCPF_FAIL:
2658 ioc_st = BFA_IOC_FAIL;
2659 break;
2660
2661 case BFA_IOCPF_INITFAIL:
2662 ioc_st = BFA_IOC_INITFAIL;
2663 break;
2664
2665 default:
2666 break;
2667 }
2668 }
2669
2670 return ioc_st;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002671}
2672
2673void
Jing Huang7725ccf2009-09-23 17:46:15 -07002674bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2675{
Jing Huang6a18b162010-10-18 17:08:54 -07002676 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
Jing Huang7725ccf2009-09-23 17:46:15 -07002677
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002678 ioc_attr->state = bfa_ioc_get_state(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002679 ioc_attr->port_id = ioc->port_id;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07002680 ioc_attr->port_mode = ioc->port_mode;
2681 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2682 ioc_attr->cap_bm = ioc->ad_cap_bm;
Jing Huang7725ccf2009-09-23 17:46:15 -07002683
Krishna Gudipati2993cc72010-03-05 19:36:47 -08002684 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002685
2686 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2687
2688 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2689 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
Krishna Gudipati0a4b1fc2010-03-05 19:37:57 -08002690 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
Jing Huang7725ccf2009-09-23 17:46:15 -07002691}
2692
Jing Huang7725ccf2009-09-23 17:46:15 -07002693mac_t
2694bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2695{
Jing Huang15b64a82010-07-08 19:48:12 -07002696 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002697 * Check the IOC type and return the appropriate MAC
Jing Huang15b64a82010-07-08 19:48:12 -07002698 */
2699 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002700 return ioc->attr->fcoe_mac;
Jing Huang15b64a82010-07-08 19:48:12 -07002701 else
2702 return ioc->attr->mac;
2703}
2704
Jing Huang15b64a82010-07-08 19:48:12 -07002705mac_t
2706bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2707{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002708 mac_t m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002709
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002710 m = ioc->attr->mfg_mac;
2711 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2712 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2713 else
2714 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2715 bfa_ioc_pcifn(ioc));
Jing Huang7725ccf2009-09-23 17:46:15 -07002716
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002717 return m;
Jing Huang7725ccf2009-09-23 17:46:15 -07002718}
2719
Jing Huang5fbe25c2010-10-18 17:17:23 -07002720/*
Krishna Gudipati7826f302011-07-20 16:59:13 -07002721 * Send AEN notification
2722 */
2723void
2724bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event)
2725{
2726 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
2727 struct bfa_aen_entry_s *aen_entry;
2728 enum bfa_ioc_type_e ioc_type;
2729
2730 bfad_get_aen_entry(bfad, aen_entry);
2731 if (!aen_entry)
2732 return;
2733
2734 ioc_type = bfa_ioc_get_type(ioc);
2735 switch (ioc_type) {
2736 case BFA_IOC_TYPE_FC:
2737 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2738 break;
2739 case BFA_IOC_TYPE_FCoE:
2740 aen_entry->aen_data.ioc.pwwn = ioc->attr->pwwn;
2741 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2742 break;
2743 case BFA_IOC_TYPE_LL:
2744 aen_entry->aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
2745 break;
2746 default:
2747 WARN_ON(ioc_type != BFA_IOC_TYPE_FC);
2748 break;
2749 }
2750
2751 /* Send the AEN notification */
2752 aen_entry->aen_data.ioc.ioc_type = ioc_type;
2753 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
2754 BFA_AEN_CAT_IOC, event);
2755}
2756
2757/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002758 * Retrieve saved firmware trace from a prior IOC failure.
2759 */
2760bfa_status_t
2761bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2762{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002763 int tlen;
Jing Huang7725ccf2009-09-23 17:46:15 -07002764
2765 if (ioc->dbg_fwsave_len == 0)
2766 return BFA_STATUS_ENOFSAVE;
2767
2768 tlen = *trclen;
2769 if (tlen > ioc->dbg_fwsave_len)
2770 tlen = ioc->dbg_fwsave_len;
2771
Jing Huang6a18b162010-10-18 17:08:54 -07002772 memcpy(trcdata, ioc->dbg_fwsave, tlen);
Jing Huang7725ccf2009-09-23 17:46:15 -07002773 *trclen = tlen;
2774 return BFA_STATUS_OK;
2775}
2776
Krishna Gudipati738c9e62010-03-05 19:36:19 -08002777
Jing Huang5fbe25c2010-10-18 17:17:23 -07002778/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002779 * Retrieve saved firmware trace from a prior IOC failure.
2780 */
2781bfa_status_t
2782bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2783{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002784 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2785 int tlen;
2786 bfa_status_t status;
Jing Huang7725ccf2009-09-23 17:46:15 -07002787
2788 bfa_trc(ioc, *trclen);
2789
Jing Huang7725ccf2009-09-23 17:46:15 -07002790 tlen = *trclen;
2791 if (tlen > BFA_DBG_FWTRC_LEN)
2792 tlen = BFA_DBG_FWTRC_LEN;
Jing Huang7725ccf2009-09-23 17:46:15 -07002793
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002794 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2795 *trclen = tlen;
2796 return status;
2797}
Jing Huang7725ccf2009-09-23 17:46:15 -07002798
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002799static void
2800bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2801{
2802 struct bfa_mbox_cmd_s cmd;
2803 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
Jing Huang7725ccf2009-09-23 17:46:15 -07002804
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002805 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2806 bfa_ioc_portid(ioc));
Krishna Gudipatid37779f2011-06-13 15:42:10 -07002807 req->clscode = cpu_to_be16(ioc->clscode);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002808 bfa_ioc_mbox_queue(ioc, &cmd);
2809}
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002810
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002811static void
2812bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2813{
2814 u32 fwsync_iter = 1000;
2815
2816 bfa_ioc_send_fwsync(ioc);
2817
Jing Huang5fbe25c2010-10-18 17:17:23 -07002818 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002819 * After sending a fw sync mbox command wait for it to
2820 * take effect. We will not wait for a response because
2821 * 1. fw_sync mbox cmd doesn't have a response.
2822 * 2. Even if we implement that, interrupts might not
2823 * be enabled when we call this function.
2824 * So, just keep checking if any mbox cmd is pending, and
2825 * after waiting for a reasonable amount of time, go ahead.
2826 * It is possible that fw has crashed and the mbox command
2827 * is never acknowledged.
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002828 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002829 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2830 fwsync_iter--;
2831}
Krishna Gudipati0a20de42010-03-05 19:34:20 -08002832
Jing Huang5fbe25c2010-10-18 17:17:23 -07002833/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002834 * Dump firmware smem
2835 */
2836bfa_status_t
2837bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2838 u32 *offset, int *buflen)
2839{
2840 u32 loff;
2841 int dlen;
2842 bfa_status_t status;
2843 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07002844
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002845 if (*offset >= smem_len) {
2846 *offset = *buflen = 0;
2847 return BFA_STATUS_EINVAL;
2848 }
2849
2850 loff = *offset;
2851 dlen = *buflen;
2852
Jing Huang5fbe25c2010-10-18 17:17:23 -07002853 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002854 * First smem read, sync smem before proceeding
2855 * No need to sync before reading every chunk.
2856 */
2857 if (loff == 0)
2858 bfa_ioc_fwsync(ioc);
2859
2860 if ((loff + dlen) >= smem_len)
2861 dlen = smem_len - loff;
2862
2863 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2864
2865 if (status != BFA_STATUS_OK) {
2866 *offset = *buflen = 0;
2867 return status;
2868 }
2869
2870 *offset += dlen;
2871
2872 if (*offset >= smem_len)
2873 *offset = 0;
2874
2875 *buflen = dlen;
2876
2877 return status;
2878}
2879
Jing Huang5fbe25c2010-10-18 17:17:23 -07002880/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002881 * Firmware statistics
2882 */
2883bfa_status_t
2884bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2885{
2886 u32 loff = BFI_IOC_FWSTATS_OFF + \
2887 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2888 int tlen;
2889 bfa_status_t status;
2890
2891 if (ioc->stats_busy) {
2892 bfa_trc(ioc, ioc->stats_busy);
2893 return BFA_STATUS_DEVBUSY;
2894 }
2895 ioc->stats_busy = BFA_TRUE;
2896
2897 tlen = sizeof(struct bfa_fw_stats_s);
2898 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2899
2900 ioc->stats_busy = BFA_FALSE;
2901 return status;
2902}
2903
2904bfa_status_t
2905bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2906{
2907 u32 loff = BFI_IOC_FWSTATS_OFF + \
2908 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2909 int tlen;
2910 bfa_status_t status;
2911
2912 if (ioc->stats_busy) {
2913 bfa_trc(ioc, ioc->stats_busy);
2914 return BFA_STATUS_DEVBUSY;
2915 }
2916 ioc->stats_busy = BFA_TRUE;
2917
2918 tlen = sizeof(struct bfa_fw_stats_s);
2919 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2920
2921 ioc->stats_busy = BFA_FALSE;
2922 return status;
Jing Huang7725ccf2009-09-23 17:46:15 -07002923}
2924
Jing Huang5fbe25c2010-10-18 17:17:23 -07002925/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002926 * Save firmware trace if configured.
2927 */
2928static void
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08002929bfa_ioc_debug_save_ftrc(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -07002930{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002931 int tlen;
Jing Huang7725ccf2009-09-23 17:46:15 -07002932
Krishna Gudipati4e78efe2010-12-13 16:16:09 -08002933 if (ioc->dbg_fwsave_once) {
2934 ioc->dbg_fwsave_once = BFA_FALSE;
2935 if (ioc->dbg_fwsave_len) {
2936 tlen = ioc->dbg_fwsave_len;
2937 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2938 }
Jing Huang7725ccf2009-09-23 17:46:15 -07002939 }
2940}
2941
Jing Huang5fbe25c2010-10-18 17:17:23 -07002942/*
Jing Huang7725ccf2009-09-23 17:46:15 -07002943 * Firmware failure detected. Start recovery actions.
2944 */
2945static void
2946bfa_ioc_recover(struct bfa_ioc_s *ioc)
2947{
Jing Huang7725ccf2009-09-23 17:46:15 -07002948 bfa_ioc_stats(ioc, ioc_hbfails);
Krishna Gudipati5a0adae2011-06-24 20:22:56 -07002949 ioc->stats.hb_count = ioc->hb_count;
Jing Huang7725ccf2009-09-23 17:46:15 -07002950 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2951}
2952
Jing Huang7725ccf2009-09-23 17:46:15 -07002953static void
Jing Huang07b28382010-07-08 19:59:24 -07002954bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
Jing Huang7725ccf2009-09-23 17:46:15 -07002955{
Jing Huang07b28382010-07-08 19:59:24 -07002956 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2957 return;
Krishna Gudipati7826f302011-07-20 16:59:13 -07002958 if (ioc->attr->nwwn == 0)
2959 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_NWWN);
2960 if (ioc->attr->pwwn == 0)
2961 bfa_ioc_aen_post(ioc, BFA_IOC_AEN_INVALID_PWWN);
Jing Huang7725ccf2009-09-23 17:46:15 -07002962}
2963
Jing Huang5fbe25c2010-10-18 17:17:23 -07002964/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -08002965 * BFA IOC PF private functions
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002966 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002967static void
2968bfa_iocpf_timeout(void *ioc_arg)
2969{
2970 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2971
2972 bfa_trc(ioc, 0);
2973 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2974}
2975
2976static void
2977bfa_iocpf_sem_timeout(void *ioc_arg)
2978{
2979 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2980
2981 bfa_ioc_hw_sem_get(ioc);
2982}
2983
Krishna Gudipati775c7742011-06-13 15:52:12 -07002984static void
2985bfa_ioc_poll_fwinit(struct bfa_ioc_s *ioc)
2986{
2987 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
2988
2989 bfa_trc(ioc, fwstate);
2990
2991 if (fwstate == BFI_IOC_DISABLED) {
2992 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
2993 return;
2994 }
2995
2996 if (ioc->iocpf.poll_time >= BFA_IOC_TOV)
2997 bfa_iocpf_timeout(ioc);
2998 else {
2999 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3000 bfa_iocpf_poll_timer_start(ioc);
3001 }
3002}
3003
3004static void
3005bfa_iocpf_poll_timeout(void *ioc_arg)
3006{
3007 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
3008
3009 bfa_ioc_poll_fwinit(ioc);
3010}
3011
Jing Huang5fbe25c2010-10-18 17:17:23 -07003012/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003013 * bfa timer function
3014 */
3015void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003016bfa_timer_beat(struct bfa_timer_mod_s *mod)
3017{
3018 struct list_head *qh = &mod->timer_q;
3019 struct list_head *qe, *qe_next;
3020 struct bfa_timer_s *elem;
3021 struct list_head timedout_q;
3022
3023 INIT_LIST_HEAD(&timedout_q);
3024
3025 qe = bfa_q_next(qh);
3026
3027 while (qe != qh) {
3028 qe_next = bfa_q_next(qe);
3029
3030 elem = (struct bfa_timer_s *) qe;
3031 if (elem->timeout <= BFA_TIMER_FREQ) {
3032 elem->timeout = 0;
3033 list_del(&elem->qe);
3034 list_add_tail(&elem->qe, &timedout_q);
3035 } else {
3036 elem->timeout -= BFA_TIMER_FREQ;
3037 }
3038
3039 qe = qe_next; /* go to next elem */
3040 }
3041
3042 /*
3043 * Pop all the timeout entries
3044 */
3045 while (!list_empty(&timedout_q)) {
3046 bfa_q_deq(&timedout_q, &elem);
3047 elem->timercb(elem->arg);
3048 }
3049}
3050
Jing Huang5fbe25c2010-10-18 17:17:23 -07003051/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003052 * Should be called with lock protection
3053 */
3054void
3055bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
3056 void (*timercb) (void *), void *arg, unsigned int timeout)
3057{
3058
Jing Huangd4b671c2010-12-26 21:46:35 -08003059 WARN_ON(timercb == NULL);
3060 WARN_ON(bfa_q_is_on_q(&mod->timer_q, timer));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003061
3062 timer->timeout = timeout;
3063 timer->timercb = timercb;
3064 timer->arg = arg;
3065
3066 list_add_tail(&timer->qe, &mod->timer_q);
3067}
3068
Jing Huang5fbe25c2010-10-18 17:17:23 -07003069/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003070 * Should be called with lock protection
3071 */
3072void
3073bfa_timer_stop(struct bfa_timer_s *timer)
3074{
Jing Huangd4b671c2010-12-26 21:46:35 -08003075 WARN_ON(list_empty(&timer->qe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003076
3077 list_del(&timer->qe);
3078}
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07003079
3080/*
3081 * ASIC block related
3082 */
3083static void
3084bfa_ablk_config_swap(struct bfa_ablk_cfg_s *cfg)
3085{
3086 struct bfa_ablk_cfg_inst_s *cfg_inst;
3087 int i, j;
3088 u16 be16;
3089 u32 be32;
3090
3091 for (i = 0; i < BFA_ABLK_MAX; i++) {
3092 cfg_inst = &cfg->inst[i];
3093 for (j = 0; j < BFA_ABLK_MAX_PFS; j++) {
3094 be16 = cfg_inst->pf_cfg[j].pers;
3095 cfg_inst->pf_cfg[j].pers = be16_to_cpu(be16);
3096 be16 = cfg_inst->pf_cfg[j].num_qpairs;
3097 cfg_inst->pf_cfg[j].num_qpairs = be16_to_cpu(be16);
3098 be16 = cfg_inst->pf_cfg[j].num_vectors;
3099 cfg_inst->pf_cfg[j].num_vectors = be16_to_cpu(be16);
3100 be32 = cfg_inst->pf_cfg[j].bw;
3101 cfg_inst->pf_cfg[j].bw = be16_to_cpu(be32);
3102 }
3103 }
3104}
3105
3106static void
3107bfa_ablk_isr(void *cbarg, struct bfi_mbmsg_s *msg)
3108{
3109 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3110 struct bfi_ablk_i2h_rsp_s *rsp = (struct bfi_ablk_i2h_rsp_s *)msg;
3111 bfa_ablk_cbfn_t cbfn;
3112
3113 WARN_ON(msg->mh.msg_class != BFI_MC_ABLK);
3114 bfa_trc(ablk->ioc, msg->mh.msg_id);
3115
3116 switch (msg->mh.msg_id) {
3117 case BFI_ABLK_I2H_QUERY:
3118 if (rsp->status == BFA_STATUS_OK) {
3119 memcpy(ablk->cfg, ablk->dma_addr.kva,
3120 sizeof(struct bfa_ablk_cfg_s));
3121 bfa_ablk_config_swap(ablk->cfg);
3122 ablk->cfg = NULL;
3123 }
3124 break;
3125
3126 case BFI_ABLK_I2H_ADPT_CONFIG:
3127 case BFI_ABLK_I2H_PORT_CONFIG:
3128 /* update config port mode */
3129 ablk->ioc->port_mode_cfg = rsp->port_mode;
3130
3131 case BFI_ABLK_I2H_PF_DELETE:
3132 case BFI_ABLK_I2H_PF_UPDATE:
3133 case BFI_ABLK_I2H_OPTROM_ENABLE:
3134 case BFI_ABLK_I2H_OPTROM_DISABLE:
3135 /* No-op */
3136 break;
3137
3138 case BFI_ABLK_I2H_PF_CREATE:
3139 *(ablk->pcifn) = rsp->pcifn;
3140 ablk->pcifn = NULL;
3141 break;
3142
3143 default:
3144 WARN_ON(1);
3145 }
3146
3147 ablk->busy = BFA_FALSE;
3148 if (ablk->cbfn) {
3149 cbfn = ablk->cbfn;
3150 ablk->cbfn = NULL;
3151 cbfn(ablk->cbarg, rsp->status);
3152 }
3153}
3154
3155static void
3156bfa_ablk_notify(void *cbarg, enum bfa_ioc_event_e event)
3157{
3158 struct bfa_ablk_s *ablk = (struct bfa_ablk_s *)cbarg;
3159
3160 bfa_trc(ablk->ioc, event);
3161
3162 switch (event) {
3163 case BFA_IOC_E_ENABLED:
3164 WARN_ON(ablk->busy != BFA_FALSE);
3165 break;
3166
3167 case BFA_IOC_E_DISABLED:
3168 case BFA_IOC_E_FAILED:
3169 /* Fail any pending requests */
3170 ablk->pcifn = NULL;
3171 if (ablk->busy) {
3172 if (ablk->cbfn)
3173 ablk->cbfn(ablk->cbarg, BFA_STATUS_FAILED);
3174 ablk->cbfn = NULL;
3175 ablk->busy = BFA_FALSE;
3176 }
3177 break;
3178
3179 default:
3180 WARN_ON(1);
3181 break;
3182 }
3183}
3184
3185u32
3186bfa_ablk_meminfo(void)
3187{
3188 return BFA_ROUNDUP(sizeof(struct bfa_ablk_cfg_s), BFA_DMA_ALIGN_SZ);
3189}
3190
3191void
3192bfa_ablk_memclaim(struct bfa_ablk_s *ablk, u8 *dma_kva, u64 dma_pa)
3193{
3194 ablk->dma_addr.kva = dma_kva;
3195 ablk->dma_addr.pa = dma_pa;
3196}
3197
3198void
3199bfa_ablk_attach(struct bfa_ablk_s *ablk, struct bfa_ioc_s *ioc)
3200{
3201 ablk->ioc = ioc;
3202
3203 bfa_ioc_mbox_regisr(ablk->ioc, BFI_MC_ABLK, bfa_ablk_isr, ablk);
Krishna Gudipati3350d982011-06-24 20:28:37 -07003204 bfa_q_qe_init(&ablk->ioc_notify);
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07003205 bfa_ioc_notify_init(&ablk->ioc_notify, bfa_ablk_notify, ablk);
3206 list_add_tail(&ablk->ioc_notify.qe, &ablk->ioc->notify_q);
3207}
3208
3209bfa_status_t
3210bfa_ablk_query(struct bfa_ablk_s *ablk, struct bfa_ablk_cfg_s *ablk_cfg,
3211 bfa_ablk_cbfn_t cbfn, void *cbarg)
3212{
3213 struct bfi_ablk_h2i_query_s *m;
3214
3215 WARN_ON(!ablk_cfg);
3216
3217 if (!bfa_ioc_is_operational(ablk->ioc)) {
3218 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3219 return BFA_STATUS_IOC_FAILURE;
3220 }
3221
3222 if (ablk->busy) {
3223 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3224 return BFA_STATUS_DEVBUSY;
3225 }
3226
3227 ablk->cfg = ablk_cfg;
3228 ablk->cbfn = cbfn;
3229 ablk->cbarg = cbarg;
3230 ablk->busy = BFA_TRUE;
3231
3232 m = (struct bfi_ablk_h2i_query_s *)ablk->mb.msg;
3233 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_QUERY,
3234 bfa_ioc_portid(ablk->ioc));
3235 bfa_dma_be_addr_set(m->addr, ablk->dma_addr.pa);
3236 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3237
3238 return BFA_STATUS_OK;
3239}
3240
3241bfa_status_t
3242bfa_ablk_pf_create(struct bfa_ablk_s *ablk, u16 *pcifn,
3243 u8 port, enum bfi_pcifn_class personality, int bw,
3244 bfa_ablk_cbfn_t cbfn, void *cbarg)
3245{
3246 struct bfi_ablk_h2i_pf_req_s *m;
3247
3248 if (!bfa_ioc_is_operational(ablk->ioc)) {
3249 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3250 return BFA_STATUS_IOC_FAILURE;
3251 }
3252
3253 if (ablk->busy) {
3254 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3255 return BFA_STATUS_DEVBUSY;
3256 }
3257
3258 ablk->pcifn = pcifn;
3259 ablk->cbfn = cbfn;
3260 ablk->cbarg = cbarg;
3261 ablk->busy = BFA_TRUE;
3262
3263 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3264 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_CREATE,
3265 bfa_ioc_portid(ablk->ioc));
3266 m->pers = cpu_to_be16((u16)personality);
3267 m->bw = cpu_to_be32(bw);
3268 m->port = port;
3269 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3270
3271 return BFA_STATUS_OK;
3272}
3273
3274bfa_status_t
3275bfa_ablk_pf_delete(struct bfa_ablk_s *ablk, int pcifn,
3276 bfa_ablk_cbfn_t cbfn, void *cbarg)
3277{
3278 struct bfi_ablk_h2i_pf_req_s *m;
3279
3280 if (!bfa_ioc_is_operational(ablk->ioc)) {
3281 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3282 return BFA_STATUS_IOC_FAILURE;
3283 }
3284
3285 if (ablk->busy) {
3286 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3287 return BFA_STATUS_DEVBUSY;
3288 }
3289
3290 ablk->cbfn = cbfn;
3291 ablk->cbarg = cbarg;
3292 ablk->busy = BFA_TRUE;
3293
3294 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3295 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_DELETE,
3296 bfa_ioc_portid(ablk->ioc));
3297 m->pcifn = (u8)pcifn;
3298 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3299
3300 return BFA_STATUS_OK;
3301}
3302
3303bfa_status_t
3304bfa_ablk_adapter_config(struct bfa_ablk_s *ablk, enum bfa_mode_s mode,
3305 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3306{
3307 struct bfi_ablk_h2i_cfg_req_s *m;
3308
3309 if (!bfa_ioc_is_operational(ablk->ioc)) {
3310 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3311 return BFA_STATUS_IOC_FAILURE;
3312 }
3313
3314 if (ablk->busy) {
3315 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3316 return BFA_STATUS_DEVBUSY;
3317 }
3318
3319 ablk->cbfn = cbfn;
3320 ablk->cbarg = cbarg;
3321 ablk->busy = BFA_TRUE;
3322
3323 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3324 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_ADPT_CONFIG,
3325 bfa_ioc_portid(ablk->ioc));
3326 m->mode = (u8)mode;
3327 m->max_pf = (u8)max_pf;
3328 m->max_vf = (u8)max_vf;
3329 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3330
3331 return BFA_STATUS_OK;
3332}
3333
3334bfa_status_t
3335bfa_ablk_port_config(struct bfa_ablk_s *ablk, int port, enum bfa_mode_s mode,
3336 int max_pf, int max_vf, bfa_ablk_cbfn_t cbfn, void *cbarg)
3337{
3338 struct bfi_ablk_h2i_cfg_req_s *m;
3339
3340 if (!bfa_ioc_is_operational(ablk->ioc)) {
3341 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3342 return BFA_STATUS_IOC_FAILURE;
3343 }
3344
3345 if (ablk->busy) {
3346 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3347 return BFA_STATUS_DEVBUSY;
3348 }
3349
3350 ablk->cbfn = cbfn;
3351 ablk->cbarg = cbarg;
3352 ablk->busy = BFA_TRUE;
3353
3354 m = (struct bfi_ablk_h2i_cfg_req_s *)ablk->mb.msg;
3355 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PORT_CONFIG,
3356 bfa_ioc_portid(ablk->ioc));
3357 m->port = (u8)port;
3358 m->mode = (u8)mode;
3359 m->max_pf = (u8)max_pf;
3360 m->max_vf = (u8)max_vf;
3361 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3362
3363 return BFA_STATUS_OK;
3364}
3365
3366bfa_status_t
3367bfa_ablk_pf_update(struct bfa_ablk_s *ablk, int pcifn, int bw,
3368 bfa_ablk_cbfn_t cbfn, void *cbarg)
3369{
3370 struct bfi_ablk_h2i_pf_req_s *m;
3371
3372 if (!bfa_ioc_is_operational(ablk->ioc)) {
3373 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3374 return BFA_STATUS_IOC_FAILURE;
3375 }
3376
3377 if (ablk->busy) {
3378 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3379 return BFA_STATUS_DEVBUSY;
3380 }
3381
3382 ablk->cbfn = cbfn;
3383 ablk->cbarg = cbarg;
3384 ablk->busy = BFA_TRUE;
3385
3386 m = (struct bfi_ablk_h2i_pf_req_s *)ablk->mb.msg;
3387 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_PF_UPDATE,
3388 bfa_ioc_portid(ablk->ioc));
3389 m->pcifn = (u8)pcifn;
3390 m->bw = cpu_to_be32(bw);
3391 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3392
3393 return BFA_STATUS_OK;
3394}
3395
3396bfa_status_t
3397bfa_ablk_optrom_en(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3398{
3399 struct bfi_ablk_h2i_optrom_s *m;
3400
3401 if (!bfa_ioc_is_operational(ablk->ioc)) {
3402 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3403 return BFA_STATUS_IOC_FAILURE;
3404 }
3405
3406 if (ablk->busy) {
3407 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3408 return BFA_STATUS_DEVBUSY;
3409 }
3410
3411 ablk->cbfn = cbfn;
3412 ablk->cbarg = cbarg;
3413 ablk->busy = BFA_TRUE;
3414
3415 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3416 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_ENABLE,
3417 bfa_ioc_portid(ablk->ioc));
3418 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3419
3420 return BFA_STATUS_OK;
3421}
3422
3423bfa_status_t
3424bfa_ablk_optrom_dis(struct bfa_ablk_s *ablk, bfa_ablk_cbfn_t cbfn, void *cbarg)
3425{
3426 struct bfi_ablk_h2i_optrom_s *m;
3427
3428 if (!bfa_ioc_is_operational(ablk->ioc)) {
3429 bfa_trc(ablk->ioc, BFA_STATUS_IOC_FAILURE);
3430 return BFA_STATUS_IOC_FAILURE;
3431 }
3432
3433 if (ablk->busy) {
3434 bfa_trc(ablk->ioc, BFA_STATUS_DEVBUSY);
3435 return BFA_STATUS_DEVBUSY;
3436 }
3437
3438 ablk->cbfn = cbfn;
3439 ablk->cbarg = cbarg;
3440 ablk->busy = BFA_TRUE;
3441
3442 m = (struct bfi_ablk_h2i_optrom_s *)ablk->mb.msg;
3443 bfi_h2i_set(m->mh, BFI_MC_ABLK, BFI_ABLK_H2I_OPTROM_DISABLE,
3444 bfa_ioc_portid(ablk->ioc));
3445 bfa_ioc_mbox_queue(ablk->ioc, &ablk->mb);
3446
3447 return BFA_STATUS_OK;
3448}
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003449
3450/*
3451 * SFP module specific
3452 */
3453
3454/* forward declarations */
3455static void bfa_sfp_getdata_send(struct bfa_sfp_s *sfp);
3456static void bfa_sfp_media_get(struct bfa_sfp_s *sfp);
3457static bfa_status_t bfa_sfp_speed_valid(struct bfa_sfp_s *sfp,
3458 enum bfa_port_speed portspeed);
3459
3460static void
3461bfa_cb_sfp_show(struct bfa_sfp_s *sfp)
3462{
3463 bfa_trc(sfp, sfp->lock);
3464 if (sfp->cbfn)
3465 sfp->cbfn(sfp->cbarg, sfp->status);
3466 sfp->lock = 0;
3467 sfp->cbfn = NULL;
3468}
3469
3470static void
3471bfa_cb_sfp_state_query(struct bfa_sfp_s *sfp)
3472{
3473 bfa_trc(sfp, sfp->portspeed);
3474 if (sfp->media) {
3475 bfa_sfp_media_get(sfp);
3476 if (sfp->state_query_cbfn)
3477 sfp->state_query_cbfn(sfp->state_query_cbarg,
3478 sfp->status);
3479 sfp->media = NULL;
3480 }
3481
3482 if (sfp->portspeed) {
3483 sfp->status = bfa_sfp_speed_valid(sfp, sfp->portspeed);
3484 if (sfp->state_query_cbfn)
3485 sfp->state_query_cbfn(sfp->state_query_cbarg,
3486 sfp->status);
3487 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3488 }
3489
3490 sfp->state_query_lock = 0;
3491 sfp->state_query_cbfn = NULL;
3492}
3493
3494/*
3495 * IOC event handler.
3496 */
3497static void
3498bfa_sfp_notify(void *sfp_arg, enum bfa_ioc_event_e event)
3499{
3500 struct bfa_sfp_s *sfp = sfp_arg;
3501
3502 bfa_trc(sfp, event);
3503 bfa_trc(sfp, sfp->lock);
3504 bfa_trc(sfp, sfp->state_query_lock);
3505
3506 switch (event) {
3507 case BFA_IOC_E_DISABLED:
3508 case BFA_IOC_E_FAILED:
3509 if (sfp->lock) {
3510 sfp->status = BFA_STATUS_IOC_FAILURE;
3511 bfa_cb_sfp_show(sfp);
3512 }
3513
3514 if (sfp->state_query_lock) {
3515 sfp->status = BFA_STATUS_IOC_FAILURE;
3516 bfa_cb_sfp_state_query(sfp);
3517 }
3518 break;
3519
3520 default:
3521 break;
3522 }
3523}
3524
3525/*
Krishna Gudipati7826f302011-07-20 16:59:13 -07003526 * SFP's State Change Notification post to AEN
3527 */
3528static void
3529bfa_sfp_scn_aen_post(struct bfa_sfp_s *sfp, struct bfi_sfp_scn_s *rsp)
3530{
3531 struct bfad_s *bfad = (struct bfad_s *)sfp->ioc->bfa->bfad;
3532 struct bfa_aen_entry_s *aen_entry;
3533 enum bfa_port_aen_event aen_evt = 0;
3534
3535 bfa_trc(sfp, (((u64)rsp->pomlvl) << 16) | (((u64)rsp->sfpid) << 8) |
3536 ((u64)rsp->event));
3537
3538 bfad_get_aen_entry(bfad, aen_entry);
3539 if (!aen_entry)
3540 return;
3541
3542 aen_entry->aen_data.port.ioc_type = bfa_ioc_get_type(sfp->ioc);
3543 aen_entry->aen_data.port.pwwn = sfp->ioc->attr->pwwn;
3544 aen_entry->aen_data.port.mac = bfa_ioc_get_mac(sfp->ioc);
3545
3546 switch (rsp->event) {
3547 case BFA_SFP_SCN_INSERTED:
3548 aen_evt = BFA_PORT_AEN_SFP_INSERT;
3549 break;
3550 case BFA_SFP_SCN_REMOVED:
3551 aen_evt = BFA_PORT_AEN_SFP_REMOVE;
3552 break;
3553 case BFA_SFP_SCN_FAILED:
3554 aen_evt = BFA_PORT_AEN_SFP_ACCESS_ERROR;
3555 break;
3556 case BFA_SFP_SCN_UNSUPPORT:
3557 aen_evt = BFA_PORT_AEN_SFP_UNSUPPORT;
3558 break;
3559 case BFA_SFP_SCN_POM:
3560 aen_evt = BFA_PORT_AEN_SFP_POM;
3561 aen_entry->aen_data.port.level = rsp->pomlvl;
3562 break;
3563 default:
3564 bfa_trc(sfp, rsp->event);
3565 WARN_ON(1);
3566 }
3567
3568 /* Send the AEN notification */
3569 bfad_im_post_vendor_event(aen_entry, bfad, ++sfp->ioc->ioc_aen_seq,
3570 BFA_AEN_CAT_PORT, aen_evt);
3571}
3572
3573/*
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003574 * SFP get data send
3575 */
3576static void
3577bfa_sfp_getdata_send(struct bfa_sfp_s *sfp)
3578{
3579 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3580
3581 bfa_trc(sfp, req->memtype);
3582
3583 /* build host command */
3584 bfi_h2i_set(req->mh, BFI_MC_SFP, BFI_SFP_H2I_SHOW,
3585 bfa_ioc_portid(sfp->ioc));
3586
3587 /* send mbox cmd */
3588 bfa_ioc_mbox_queue(sfp->ioc, &sfp->mbcmd);
3589}
3590
3591/*
3592 * SFP is valid, read sfp data
3593 */
3594static void
3595bfa_sfp_getdata(struct bfa_sfp_s *sfp, enum bfi_sfp_mem_e memtype)
3596{
3597 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3598
3599 WARN_ON(sfp->lock != 0);
3600 bfa_trc(sfp, sfp->state);
3601
3602 sfp->lock = 1;
3603 sfp->memtype = memtype;
3604 req->memtype = memtype;
3605
3606 /* Setup SG list */
3607 bfa_alen_set(&req->alen, sizeof(struct sfp_mem_s), sfp->dbuf_pa);
3608
3609 bfa_sfp_getdata_send(sfp);
3610}
3611
3612/*
Krishna Gudipati7826f302011-07-20 16:59:13 -07003613 * SFP scn handler
3614 */
3615static void
3616bfa_sfp_scn(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3617{
3618 struct bfi_sfp_scn_s *rsp = (struct bfi_sfp_scn_s *) msg;
3619
3620 switch (rsp->event) {
3621 case BFA_SFP_SCN_INSERTED:
3622 sfp->state = BFA_SFP_STATE_INSERTED;
3623 sfp->data_valid = 0;
3624 bfa_sfp_scn_aen_post(sfp, rsp);
3625 break;
3626 case BFA_SFP_SCN_REMOVED:
3627 sfp->state = BFA_SFP_STATE_REMOVED;
3628 sfp->data_valid = 0;
3629 bfa_sfp_scn_aen_post(sfp, rsp);
3630 break;
3631 case BFA_SFP_SCN_FAILED:
3632 sfp->state = BFA_SFP_STATE_FAILED;
3633 sfp->data_valid = 0;
3634 bfa_sfp_scn_aen_post(sfp, rsp);
3635 break;
3636 case BFA_SFP_SCN_UNSUPPORT:
3637 sfp->state = BFA_SFP_STATE_UNSUPPORT;
3638 bfa_sfp_scn_aen_post(sfp, rsp);
3639 if (!sfp->lock)
3640 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3641 break;
3642 case BFA_SFP_SCN_POM:
3643 bfa_sfp_scn_aen_post(sfp, rsp);
3644 break;
3645 case BFA_SFP_SCN_VALID:
3646 sfp->state = BFA_SFP_STATE_VALID;
3647 if (!sfp->lock)
3648 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3649 break;
3650 default:
3651 bfa_trc(sfp, rsp->event);
3652 WARN_ON(1);
3653 }
3654}
3655
3656/*
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003657 * SFP show complete
3658 */
3659static void
3660bfa_sfp_show_comp(struct bfa_sfp_s *sfp, struct bfi_mbmsg_s *msg)
3661{
3662 struct bfi_sfp_rsp_s *rsp = (struct bfi_sfp_rsp_s *) msg;
3663
3664 if (!sfp->lock) {
3665 /*
3666 * receiving response after ioc failure
3667 */
3668 bfa_trc(sfp, sfp->lock);
3669 return;
3670 }
3671
3672 bfa_trc(sfp, rsp->status);
3673 if (rsp->status == BFA_STATUS_OK) {
3674 sfp->data_valid = 1;
3675 if (sfp->state == BFA_SFP_STATE_VALID)
3676 sfp->status = BFA_STATUS_OK;
3677 else if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3678 sfp->status = BFA_STATUS_SFP_UNSUPP;
3679 else
3680 bfa_trc(sfp, sfp->state);
3681 } else {
3682 sfp->data_valid = 0;
3683 sfp->status = rsp->status;
3684 /* sfpshow shouldn't change sfp state */
3685 }
3686
3687 bfa_trc(sfp, sfp->memtype);
3688 if (sfp->memtype == BFI_SFP_MEM_DIAGEXT) {
3689 bfa_trc(sfp, sfp->data_valid);
3690 if (sfp->data_valid) {
3691 u32 size = sizeof(struct sfp_mem_s);
3692 u8 *des = (u8 *) &(sfp->sfpmem->srlid_base);
3693 memcpy(des, sfp->dbuf_kva, size);
3694 }
3695 /*
3696 * Queue completion callback.
3697 */
3698 bfa_cb_sfp_show(sfp);
3699 } else
3700 sfp->lock = 0;
3701
3702 bfa_trc(sfp, sfp->state_query_lock);
3703 if (sfp->state_query_lock) {
3704 sfp->state = rsp->state;
3705 /* Complete callback */
3706 bfa_cb_sfp_state_query(sfp);
3707 }
3708}
3709
3710/*
3711 * SFP query fw sfp state
3712 */
3713static void
3714bfa_sfp_state_query(struct bfa_sfp_s *sfp)
3715{
3716 struct bfi_sfp_req_s *req = (struct bfi_sfp_req_s *)sfp->mbcmd.msg;
3717
3718 /* Should not be doing query if not in _INIT state */
3719 WARN_ON(sfp->state != BFA_SFP_STATE_INIT);
3720 WARN_ON(sfp->state_query_lock != 0);
3721 bfa_trc(sfp, sfp->state);
3722
3723 sfp->state_query_lock = 1;
3724 req->memtype = 0;
3725
3726 if (!sfp->lock)
3727 bfa_sfp_getdata(sfp, BFI_SFP_MEM_ALL);
3728}
3729
3730static void
3731bfa_sfp_media_get(struct bfa_sfp_s *sfp)
3732{
3733 enum bfa_defs_sfp_media_e *media = sfp->media;
3734
3735 *media = BFA_SFP_MEDIA_UNKNOWN;
3736
3737 if (sfp->state == BFA_SFP_STATE_UNSUPPORT)
3738 *media = BFA_SFP_MEDIA_UNSUPPORT;
3739 else if (sfp->state == BFA_SFP_STATE_VALID) {
3740 union sfp_xcvr_e10g_code_u e10g;
3741 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3742 u16 xmtr_tech = (sfpmem->srlid_base.xcvr[4] & 0x3) << 7 |
3743 (sfpmem->srlid_base.xcvr[5] >> 1);
3744
3745 e10g.b = sfpmem->srlid_base.xcvr[0];
3746 bfa_trc(sfp, e10g.b);
3747 bfa_trc(sfp, xmtr_tech);
3748 /* check fc transmitter tech */
3749 if ((xmtr_tech & SFP_XMTR_TECH_CU) ||
3750 (xmtr_tech & SFP_XMTR_TECH_CP) ||
3751 (xmtr_tech & SFP_XMTR_TECH_CA))
3752 *media = BFA_SFP_MEDIA_CU;
3753 else if ((xmtr_tech & SFP_XMTR_TECH_EL_INTRA) ||
3754 (xmtr_tech & SFP_XMTR_TECH_EL_INTER))
3755 *media = BFA_SFP_MEDIA_EL;
3756 else if ((xmtr_tech & SFP_XMTR_TECH_LL) ||
3757 (xmtr_tech & SFP_XMTR_TECH_LC))
3758 *media = BFA_SFP_MEDIA_LW;
3759 else if ((xmtr_tech & SFP_XMTR_TECH_SL) ||
3760 (xmtr_tech & SFP_XMTR_TECH_SN) ||
3761 (xmtr_tech & SFP_XMTR_TECH_SA))
3762 *media = BFA_SFP_MEDIA_SW;
3763 /* Check 10G Ethernet Compilance code */
Jing Huang98cdfb42011-11-16 12:29:26 -08003764 else if (e10g.r.e10g_sr)
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003765 *media = BFA_SFP_MEDIA_SW;
Jing Huang98cdfb42011-11-16 12:29:26 -08003766 else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003767 *media = BFA_SFP_MEDIA_LW;
Jing Huang98cdfb42011-11-16 12:29:26 -08003768 else if (e10g.r.e10g_unall)
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003769 *media = BFA_SFP_MEDIA_UNKNOWN;
3770 else
3771 bfa_trc(sfp, 0);
3772 } else
3773 bfa_trc(sfp, sfp->state);
3774}
3775
3776static bfa_status_t
3777bfa_sfp_speed_valid(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed)
3778{
3779 struct sfp_mem_s *sfpmem = (struct sfp_mem_s *)sfp->dbuf_kva;
3780 struct sfp_xcvr_s *xcvr = (struct sfp_xcvr_s *) sfpmem->srlid_base.xcvr;
3781 union sfp_xcvr_fc3_code_u fc3 = xcvr->fc3;
3782 union sfp_xcvr_e10g_code_u e10g = xcvr->e10g;
3783
3784 if (portspeed == BFA_PORT_SPEED_10GBPS) {
3785 if (e10g.r.e10g_sr || e10g.r.e10g_lr)
3786 return BFA_STATUS_OK;
3787 else {
3788 bfa_trc(sfp, e10g.b);
3789 return BFA_STATUS_UNSUPP_SPEED;
3790 }
3791 }
3792 if (((portspeed & BFA_PORT_SPEED_16GBPS) && fc3.r.mb1600) ||
3793 ((portspeed & BFA_PORT_SPEED_8GBPS) && fc3.r.mb800) ||
3794 ((portspeed & BFA_PORT_SPEED_4GBPS) && fc3.r.mb400) ||
3795 ((portspeed & BFA_PORT_SPEED_2GBPS) && fc3.r.mb200) ||
3796 ((portspeed & BFA_PORT_SPEED_1GBPS) && fc3.r.mb100))
3797 return BFA_STATUS_OK;
3798 else {
3799 bfa_trc(sfp, portspeed);
3800 bfa_trc(sfp, fc3.b);
3801 bfa_trc(sfp, e10g.b);
3802 return BFA_STATUS_UNSUPP_SPEED;
3803 }
3804}
3805
3806/*
3807 * SFP hmbox handler
3808 */
3809void
3810bfa_sfp_intr(void *sfparg, struct bfi_mbmsg_s *msg)
3811{
3812 struct bfa_sfp_s *sfp = sfparg;
3813
3814 switch (msg->mh.msg_id) {
3815 case BFI_SFP_I2H_SHOW:
3816 bfa_sfp_show_comp(sfp, msg);
3817 break;
3818
3819 case BFI_SFP_I2H_SCN:
Krishna Gudipati7826f302011-07-20 16:59:13 -07003820 bfa_sfp_scn(sfp, msg);
Krishna Gudipati51e569a2011-06-24 20:26:25 -07003821 break;
3822
3823 default:
3824 bfa_trc(sfp, msg->mh.msg_id);
3825 WARN_ON(1);
3826 }
3827}
3828
3829/*
3830 * Return DMA memory needed by sfp module.
3831 */
3832u32
3833bfa_sfp_meminfo(void)
3834{
3835 return BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3836}
3837
3838/*
3839 * Attach virtual and physical memory for SFP.
3840 */
3841void
3842bfa_sfp_attach(struct bfa_sfp_s *sfp, struct bfa_ioc_s *ioc, void *dev,
3843 struct bfa_trc_mod_s *trcmod)
3844{
3845 sfp->dev = dev;
3846 sfp->ioc = ioc;
3847 sfp->trcmod = trcmod;
3848
3849 sfp->cbfn = NULL;
3850 sfp->cbarg = NULL;
3851 sfp->sfpmem = NULL;
3852 sfp->lock = 0;
3853 sfp->data_valid = 0;
3854 sfp->state = BFA_SFP_STATE_INIT;
3855 sfp->state_query_lock = 0;
3856 sfp->state_query_cbfn = NULL;
3857 sfp->state_query_cbarg = NULL;
3858 sfp->media = NULL;
3859 sfp->portspeed = BFA_PORT_SPEED_UNKNOWN;
3860 sfp->is_elb = BFA_FALSE;
3861
3862 bfa_ioc_mbox_regisr(sfp->ioc, BFI_MC_SFP, bfa_sfp_intr, sfp);
3863 bfa_q_qe_init(&sfp->ioc_notify);
3864 bfa_ioc_notify_init(&sfp->ioc_notify, bfa_sfp_notify, sfp);
3865 list_add_tail(&sfp->ioc_notify.qe, &sfp->ioc->notify_q);
3866}
3867
3868/*
3869 * Claim Memory for SFP
3870 */
3871void
3872bfa_sfp_memclaim(struct bfa_sfp_s *sfp, u8 *dm_kva, u64 dm_pa)
3873{
3874 sfp->dbuf_kva = dm_kva;
3875 sfp->dbuf_pa = dm_pa;
3876 memset(sfp->dbuf_kva, 0, sizeof(struct sfp_mem_s));
3877
3878 dm_kva += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3879 dm_pa += BFA_ROUNDUP(sizeof(struct sfp_mem_s), BFA_DMA_ALIGN_SZ);
3880}
3881
3882/*
3883 * Show SFP eeprom content
3884 *
3885 * @param[in] sfp - bfa sfp module
3886 *
3887 * @param[out] sfpmem - sfp eeprom data
3888 *
3889 */
3890bfa_status_t
3891bfa_sfp_show(struct bfa_sfp_s *sfp, struct sfp_mem_s *sfpmem,
3892 bfa_cb_sfp_t cbfn, void *cbarg)
3893{
3894
3895 if (!bfa_ioc_is_operational(sfp->ioc)) {
3896 bfa_trc(sfp, 0);
3897 return BFA_STATUS_IOC_NON_OP;
3898 }
3899
3900 if (sfp->lock) {
3901 bfa_trc(sfp, 0);
3902 return BFA_STATUS_DEVBUSY;
3903 }
3904
3905 sfp->cbfn = cbfn;
3906 sfp->cbarg = cbarg;
3907 sfp->sfpmem = sfpmem;
3908
3909 bfa_sfp_getdata(sfp, BFI_SFP_MEM_DIAGEXT);
3910 return BFA_STATUS_OK;
3911}
3912
3913/*
3914 * Return SFP Media type
3915 *
3916 * @param[in] sfp - bfa sfp module
3917 *
3918 * @param[out] media - port speed from user
3919 *
3920 */
3921bfa_status_t
3922bfa_sfp_media(struct bfa_sfp_s *sfp, enum bfa_defs_sfp_media_e *media,
3923 bfa_cb_sfp_t cbfn, void *cbarg)
3924{
3925 if (!bfa_ioc_is_operational(sfp->ioc)) {
3926 bfa_trc(sfp, 0);
3927 return BFA_STATUS_IOC_NON_OP;
3928 }
3929
3930 sfp->media = media;
3931 if (sfp->state == BFA_SFP_STATE_INIT) {
3932 if (sfp->state_query_lock) {
3933 bfa_trc(sfp, 0);
3934 return BFA_STATUS_DEVBUSY;
3935 } else {
3936 sfp->state_query_cbfn = cbfn;
3937 sfp->state_query_cbarg = cbarg;
3938 bfa_sfp_state_query(sfp);
3939 return BFA_STATUS_SFP_NOT_READY;
3940 }
3941 }
3942
3943 bfa_sfp_media_get(sfp);
3944 return BFA_STATUS_OK;
3945}
3946
3947/*
3948 * Check if user set port speed is allowed by the SFP
3949 *
3950 * @param[in] sfp - bfa sfp module
3951 * @param[in] portspeed - port speed from user
3952 *
3953 */
3954bfa_status_t
3955bfa_sfp_speed(struct bfa_sfp_s *sfp, enum bfa_port_speed portspeed,
3956 bfa_cb_sfp_t cbfn, void *cbarg)
3957{
3958 WARN_ON(portspeed == BFA_PORT_SPEED_UNKNOWN);
3959
3960 if (!bfa_ioc_is_operational(sfp->ioc))
3961 return BFA_STATUS_IOC_NON_OP;
3962
3963 /* For Mezz card, all speed is allowed */
3964 if (bfa_mfg_is_mezz(sfp->ioc->attr->card_type))
3965 return BFA_STATUS_OK;
3966
3967 /* Check SFP state */
3968 sfp->portspeed = portspeed;
3969 if (sfp->state == BFA_SFP_STATE_INIT) {
3970 if (sfp->state_query_lock) {
3971 bfa_trc(sfp, 0);
3972 return BFA_STATUS_DEVBUSY;
3973 } else {
3974 sfp->state_query_cbfn = cbfn;
3975 sfp->state_query_cbarg = cbarg;
3976 bfa_sfp_state_query(sfp);
3977 return BFA_STATUS_SFP_NOT_READY;
3978 }
3979 }
3980
3981 if (sfp->state == BFA_SFP_STATE_REMOVED ||
3982 sfp->state == BFA_SFP_STATE_FAILED) {
3983 bfa_trc(sfp, sfp->state);
3984 return BFA_STATUS_NO_SFP_DEV;
3985 }
3986
3987 if (sfp->state == BFA_SFP_STATE_INSERTED) {
3988 bfa_trc(sfp, sfp->state);
3989 return BFA_STATUS_DEVBUSY; /* sfp is reading data */
3990 }
3991
3992 /* For eloopback, all speed is allowed */
3993 if (sfp->is_elb)
3994 return BFA_STATUS_OK;
3995
3996 return bfa_sfp_speed_valid(sfp, portspeed);
3997}
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07003998
3999/*
4000 * Flash module specific
4001 */
4002
4003/*
4004 * FLASH DMA buffer should be big enough to hold both MFG block and
4005 * asic block(64k) at the same time and also should be 2k aligned to
4006 * avoid write segement to cross sector boundary.
4007 */
4008#define BFA_FLASH_SEG_SZ 2048
4009#define BFA_FLASH_DMA_BUF_SZ \
4010 BFA_ROUNDUP(0x010000 + sizeof(struct bfa_mfg_block_s), BFA_FLASH_SEG_SZ)
4011
4012static void
Krishna Gudipati7826f302011-07-20 16:59:13 -07004013bfa_flash_aen_audit_post(struct bfa_ioc_s *ioc, enum bfa_audit_aen_event event,
4014 int inst, int type)
4015{
4016 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
4017 struct bfa_aen_entry_s *aen_entry;
4018
4019 bfad_get_aen_entry(bfad, aen_entry);
4020 if (!aen_entry)
4021 return;
4022
4023 aen_entry->aen_data.audit.pwwn = ioc->attr->pwwn;
4024 aen_entry->aen_data.audit.partition_inst = inst;
4025 aen_entry->aen_data.audit.partition_type = type;
4026
4027 /* Send the AEN notification */
4028 bfad_im_post_vendor_event(aen_entry, bfad, ++ioc->ioc_aen_seq,
4029 BFA_AEN_CAT_AUDIT, event);
4030}
4031
4032static void
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004033bfa_flash_cb(struct bfa_flash_s *flash)
4034{
4035 flash->op_busy = 0;
4036 if (flash->cbfn)
4037 flash->cbfn(flash->cbarg, flash->status);
4038}
4039
4040static void
4041bfa_flash_notify(void *cbarg, enum bfa_ioc_event_e event)
4042{
4043 struct bfa_flash_s *flash = cbarg;
4044
4045 bfa_trc(flash, event);
4046 switch (event) {
4047 case BFA_IOC_E_DISABLED:
4048 case BFA_IOC_E_FAILED:
4049 if (flash->op_busy) {
4050 flash->status = BFA_STATUS_IOC_FAILURE;
4051 flash->cbfn(flash->cbarg, flash->status);
4052 flash->op_busy = 0;
4053 }
4054 break;
4055
4056 default:
4057 break;
4058 }
4059}
4060
4061/*
4062 * Send flash attribute query request.
4063 *
4064 * @param[in] cbarg - callback argument
4065 */
4066static void
4067bfa_flash_query_send(void *cbarg)
4068{
4069 struct bfa_flash_s *flash = cbarg;
4070 struct bfi_flash_query_req_s *msg =
4071 (struct bfi_flash_query_req_s *) flash->mb.msg;
4072
4073 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
4074 bfa_ioc_portid(flash->ioc));
4075 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr_s),
4076 flash->dbuf_pa);
4077 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4078}
4079
4080/*
4081 * Send flash write request.
4082 *
4083 * @param[in] cbarg - callback argument
4084 */
4085static void
4086bfa_flash_write_send(struct bfa_flash_s *flash)
4087{
4088 struct bfi_flash_write_req_s *msg =
4089 (struct bfi_flash_write_req_s *) flash->mb.msg;
4090 u32 len;
4091
4092 msg->type = be32_to_cpu(flash->type);
4093 msg->instance = flash->instance;
4094 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4095 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4096 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4097 msg->length = be32_to_cpu(len);
4098
4099 /* indicate if it's the last msg of the whole write operation */
4100 msg->last = (len == flash->residue) ? 1 : 0;
4101
4102 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
4103 bfa_ioc_portid(flash->ioc));
4104 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4105 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
4106 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4107
4108 flash->residue -= len;
4109 flash->offset += len;
4110}
4111
4112/*
4113 * Send flash read request.
4114 *
4115 * @param[in] cbarg - callback argument
4116 */
4117static void
4118bfa_flash_read_send(void *cbarg)
4119{
4120 struct bfa_flash_s *flash = cbarg;
4121 struct bfi_flash_read_req_s *msg =
4122 (struct bfi_flash_read_req_s *) flash->mb.msg;
4123 u32 len;
4124
4125 msg->type = be32_to_cpu(flash->type);
4126 msg->instance = flash->instance;
4127 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
4128 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
4129 flash->residue : BFA_FLASH_DMA_BUF_SZ;
4130 msg->length = be32_to_cpu(len);
4131 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
4132 bfa_ioc_portid(flash->ioc));
4133 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
4134 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4135}
4136
4137/*
4138 * Send flash erase request.
4139 *
4140 * @param[in] cbarg - callback argument
4141 */
4142static void
4143bfa_flash_erase_send(void *cbarg)
4144{
4145 struct bfa_flash_s *flash = cbarg;
4146 struct bfi_flash_erase_req_s *msg =
4147 (struct bfi_flash_erase_req_s *) flash->mb.msg;
4148
4149 msg->type = be32_to_cpu(flash->type);
4150 msg->instance = flash->instance;
4151 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_ERASE_REQ,
4152 bfa_ioc_portid(flash->ioc));
4153 bfa_ioc_mbox_queue(flash->ioc, &flash->mb);
4154}
4155
4156/*
4157 * Process flash response messages upon receiving interrupts.
4158 *
4159 * @param[in] flasharg - flash structure
4160 * @param[in] msg - message structure
4161 */
4162static void
4163bfa_flash_intr(void *flasharg, struct bfi_mbmsg_s *msg)
4164{
4165 struct bfa_flash_s *flash = flasharg;
4166 u32 status;
4167
4168 union {
4169 struct bfi_flash_query_rsp_s *query;
4170 struct bfi_flash_erase_rsp_s *erase;
4171 struct bfi_flash_write_rsp_s *write;
4172 struct bfi_flash_read_rsp_s *read;
Krishna Gudipati7826f302011-07-20 16:59:13 -07004173 struct bfi_flash_event_s *event;
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004174 struct bfi_mbmsg_s *msg;
4175 } m;
4176
4177 m.msg = msg;
4178 bfa_trc(flash, msg->mh.msg_id);
4179
4180 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) {
4181 /* receiving response after ioc failure */
4182 bfa_trc(flash, 0x9999);
4183 return;
4184 }
4185
4186 switch (msg->mh.msg_id) {
4187 case BFI_FLASH_I2H_QUERY_RSP:
4188 status = be32_to_cpu(m.query->status);
4189 bfa_trc(flash, status);
4190 if (status == BFA_STATUS_OK) {
4191 u32 i;
4192 struct bfa_flash_attr_s *attr, *f;
4193
4194 attr = (struct bfa_flash_attr_s *) flash->ubuf;
4195 f = (struct bfa_flash_attr_s *) flash->dbuf_kva;
4196 attr->status = be32_to_cpu(f->status);
4197 attr->npart = be32_to_cpu(f->npart);
4198 bfa_trc(flash, attr->status);
4199 bfa_trc(flash, attr->npart);
4200 for (i = 0; i < attr->npart; i++) {
4201 attr->part[i].part_type =
4202 be32_to_cpu(f->part[i].part_type);
4203 attr->part[i].part_instance =
4204 be32_to_cpu(f->part[i].part_instance);
4205 attr->part[i].part_off =
4206 be32_to_cpu(f->part[i].part_off);
4207 attr->part[i].part_size =
4208 be32_to_cpu(f->part[i].part_size);
4209 attr->part[i].part_len =
4210 be32_to_cpu(f->part[i].part_len);
4211 attr->part[i].part_status =
4212 be32_to_cpu(f->part[i].part_status);
4213 }
4214 }
4215 flash->status = status;
4216 bfa_flash_cb(flash);
4217 break;
4218 case BFI_FLASH_I2H_ERASE_RSP:
4219 status = be32_to_cpu(m.erase->status);
4220 bfa_trc(flash, status);
4221 flash->status = status;
4222 bfa_flash_cb(flash);
4223 break;
4224 case BFI_FLASH_I2H_WRITE_RSP:
4225 status = be32_to_cpu(m.write->status);
4226 bfa_trc(flash, status);
4227 if (status != BFA_STATUS_OK || flash->residue == 0) {
4228 flash->status = status;
4229 bfa_flash_cb(flash);
4230 } else {
4231 bfa_trc(flash, flash->offset);
4232 bfa_flash_write_send(flash);
4233 }
4234 break;
4235 case BFI_FLASH_I2H_READ_RSP:
4236 status = be32_to_cpu(m.read->status);
4237 bfa_trc(flash, status);
4238 if (status != BFA_STATUS_OK) {
4239 flash->status = status;
4240 bfa_flash_cb(flash);
4241 } else {
4242 u32 len = be32_to_cpu(m.read->length);
4243 bfa_trc(flash, flash->offset);
4244 bfa_trc(flash, len);
4245 memcpy(flash->ubuf + flash->offset,
4246 flash->dbuf_kva, len);
4247 flash->residue -= len;
4248 flash->offset += len;
4249 if (flash->residue == 0) {
4250 flash->status = status;
4251 bfa_flash_cb(flash);
4252 } else
4253 bfa_flash_read_send(flash);
4254 }
4255 break;
4256 case BFI_FLASH_I2H_BOOT_VER_RSP:
Krishna Gudipati7826f302011-07-20 16:59:13 -07004257 break;
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004258 case BFI_FLASH_I2H_EVENT:
Krishna Gudipati7826f302011-07-20 16:59:13 -07004259 status = be32_to_cpu(m.event->status);
4260 bfa_trc(flash, status);
4261 if (status == BFA_STATUS_BAD_FWCFG)
4262 bfa_ioc_aen_post(flash->ioc, BFA_IOC_AEN_FWCFG_ERROR);
4263 else if (status == BFA_STATUS_INVALID_VENDOR) {
4264 u32 param;
4265 param = be32_to_cpu(m.event->param);
4266 bfa_trc(flash, param);
4267 bfa_ioc_aen_post(flash->ioc,
4268 BFA_IOC_AEN_INVALID_VENDOR);
4269 }
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004270 break;
4271
4272 default:
4273 WARN_ON(1);
4274 }
4275}
4276
4277/*
4278 * Flash memory info API.
4279 *
4280 * @param[in] mincfg - minimal cfg variable
4281 */
4282u32
4283bfa_flash_meminfo(bfa_boolean_t mincfg)
4284{
4285 /* min driver doesn't need flash */
4286 if (mincfg)
4287 return 0;
4288 return BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4289}
4290
4291/*
4292 * Flash attach API.
4293 *
4294 * @param[in] flash - flash structure
4295 * @param[in] ioc - ioc structure
4296 * @param[in] dev - device structure
4297 * @param[in] trcmod - trace module
4298 * @param[in] logmod - log module
4299 */
4300void
4301bfa_flash_attach(struct bfa_flash_s *flash, struct bfa_ioc_s *ioc, void *dev,
4302 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
4303{
4304 flash->ioc = ioc;
4305 flash->trcmod = trcmod;
4306 flash->cbfn = NULL;
4307 flash->cbarg = NULL;
4308 flash->op_busy = 0;
4309
4310 bfa_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
4311 bfa_q_qe_init(&flash->ioc_notify);
4312 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
4313 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
4314
4315 /* min driver doesn't need flash */
4316 if (mincfg) {
4317 flash->dbuf_kva = NULL;
4318 flash->dbuf_pa = 0;
4319 }
4320}
4321
4322/*
4323 * Claim memory for flash
4324 *
4325 * @param[in] flash - flash structure
4326 * @param[in] dm_kva - pointer to virtual memory address
4327 * @param[in] dm_pa - physical memory address
4328 * @param[in] mincfg - minimal cfg variable
4329 */
4330void
4331bfa_flash_memclaim(struct bfa_flash_s *flash, u8 *dm_kva, u64 dm_pa,
4332 bfa_boolean_t mincfg)
4333{
4334 if (mincfg)
4335 return;
4336
4337 flash->dbuf_kva = dm_kva;
4338 flash->dbuf_pa = dm_pa;
4339 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
4340 dm_kva += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4341 dm_pa += BFA_ROUNDUP(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
4342}
4343
4344/*
4345 * Get flash attribute.
4346 *
4347 * @param[in] flash - flash structure
4348 * @param[in] attr - flash attribute structure
4349 * @param[in] cbfn - callback function
4350 * @param[in] cbarg - callback argument
4351 *
4352 * Return status.
4353 */
4354bfa_status_t
4355bfa_flash_get_attr(struct bfa_flash_s *flash, struct bfa_flash_attr_s *attr,
4356 bfa_cb_flash_t cbfn, void *cbarg)
4357{
4358 bfa_trc(flash, BFI_FLASH_H2I_QUERY_REQ);
4359
4360 if (!bfa_ioc_is_operational(flash->ioc))
4361 return BFA_STATUS_IOC_NON_OP;
4362
4363 if (flash->op_busy) {
4364 bfa_trc(flash, flash->op_busy);
4365 return BFA_STATUS_DEVBUSY;
4366 }
4367
4368 flash->op_busy = 1;
4369 flash->cbfn = cbfn;
4370 flash->cbarg = cbarg;
4371 flash->ubuf = (u8 *) attr;
4372 bfa_flash_query_send(flash);
4373
4374 return BFA_STATUS_OK;
4375}
4376
4377/*
4378 * Erase flash partition.
4379 *
4380 * @param[in] flash - flash structure
4381 * @param[in] type - flash partition type
4382 * @param[in] instance - flash partition instance
4383 * @param[in] cbfn - callback function
4384 * @param[in] cbarg - callback argument
4385 *
4386 * Return status.
4387 */
4388bfa_status_t
4389bfa_flash_erase_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4390 u8 instance, bfa_cb_flash_t cbfn, void *cbarg)
4391{
4392 bfa_trc(flash, BFI_FLASH_H2I_ERASE_REQ);
4393 bfa_trc(flash, type);
4394 bfa_trc(flash, instance);
4395
4396 if (!bfa_ioc_is_operational(flash->ioc))
4397 return BFA_STATUS_IOC_NON_OP;
4398
4399 if (flash->op_busy) {
4400 bfa_trc(flash, flash->op_busy);
4401 return BFA_STATUS_DEVBUSY;
4402 }
4403
4404 flash->op_busy = 1;
4405 flash->cbfn = cbfn;
4406 flash->cbarg = cbarg;
4407 flash->type = type;
4408 flash->instance = instance;
4409
4410 bfa_flash_erase_send(flash);
Krishna Gudipati7826f302011-07-20 16:59:13 -07004411 bfa_flash_aen_audit_post(flash->ioc, BFA_AUDIT_AEN_FLASH_ERASE,
4412 instance, type);
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07004413 return BFA_STATUS_OK;
4414}
4415
4416/*
4417 * Update flash partition.
4418 *
4419 * @param[in] flash - flash structure
4420 * @param[in] type - flash partition type
4421 * @param[in] instance - flash partition instance
4422 * @param[in] buf - update data buffer
4423 * @param[in] len - data buffer length
4424 * @param[in] offset - offset relative to the partition starting address
4425 * @param[in] cbfn - callback function
4426 * @param[in] cbarg - callback argument
4427 *
4428 * Return status.
4429 */
4430bfa_status_t
4431bfa_flash_update_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4432 u8 instance, void *buf, u32 len, u32 offset,
4433 bfa_cb_flash_t cbfn, void *cbarg)
4434{
4435 bfa_trc(flash, BFI_FLASH_H2I_WRITE_REQ);
4436 bfa_trc(flash, type);
4437 bfa_trc(flash, instance);
4438 bfa_trc(flash, len);
4439 bfa_trc(flash, offset);
4440
4441 if (!bfa_ioc_is_operational(flash->ioc))
4442 return BFA_STATUS_IOC_NON_OP;
4443
4444 /*
4445 * 'len' must be in word (4-byte) boundary
4446 * 'offset' must be in sector (16kb) boundary
4447 */
4448 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4449 return BFA_STATUS_FLASH_BAD_LEN;
4450
4451 if (type == BFA_FLASH_PART_MFG)
4452 return BFA_STATUS_EINVAL;
4453
4454 if (flash->op_busy) {
4455 bfa_trc(flash, flash->op_busy);
4456 return BFA_STATUS_DEVBUSY;
4457 }
4458
4459 flash->op_busy = 1;
4460 flash->cbfn = cbfn;
4461 flash->cbarg = cbarg;
4462 flash->type = type;
4463 flash->instance = instance;
4464 flash->residue = len;
4465 flash->offset = 0;
4466 flash->addr_off = offset;
4467 flash->ubuf = buf;
4468
4469 bfa_flash_write_send(flash);
4470 return BFA_STATUS_OK;
4471}
4472
4473/*
4474 * Read flash partition.
4475 *
4476 * @param[in] flash - flash structure
4477 * @param[in] type - flash partition type
4478 * @param[in] instance - flash partition instance
4479 * @param[in] buf - read data buffer
4480 * @param[in] len - data buffer length
4481 * @param[in] offset - offset relative to the partition starting address
4482 * @param[in] cbfn - callback function
4483 * @param[in] cbarg - callback argument
4484 *
4485 * Return status.
4486 */
4487bfa_status_t
4488bfa_flash_read_part(struct bfa_flash_s *flash, enum bfa_flash_part_type type,
4489 u8 instance, void *buf, u32 len, u32 offset,
4490 bfa_cb_flash_t cbfn, void *cbarg)
4491{
4492 bfa_trc(flash, BFI_FLASH_H2I_READ_REQ);
4493 bfa_trc(flash, type);
4494 bfa_trc(flash, instance);
4495 bfa_trc(flash, len);
4496 bfa_trc(flash, offset);
4497
4498 if (!bfa_ioc_is_operational(flash->ioc))
4499 return BFA_STATUS_IOC_NON_OP;
4500
4501 /*
4502 * 'len' must be in word (4-byte) boundary
4503 * 'offset' must be in sector (16kb) boundary
4504 */
4505 if (!len || (len & 0x03) || (offset & 0x00003FFF))
4506 return BFA_STATUS_FLASH_BAD_LEN;
4507
4508 if (flash->op_busy) {
4509 bfa_trc(flash, flash->op_busy);
4510 return BFA_STATUS_DEVBUSY;
4511 }
4512
4513 flash->op_busy = 1;
4514 flash->cbfn = cbfn;
4515 flash->cbarg = cbarg;
4516 flash->type = type;
4517 flash->instance = instance;
4518 flash->residue = len;
4519 flash->offset = 0;
4520 flash->addr_off = offset;
4521 flash->ubuf = buf;
4522 bfa_flash_read_send(flash);
4523
4524 return BFA_STATUS_OK;
4525}
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004526
4527/*
4528 * DIAG module specific
4529 */
4530
4531#define BFA_DIAG_MEMTEST_TOV 50000 /* memtest timeout in msec */
4532#define BFA_DIAG_FWPING_TOV 1000 /* msec */
4533
4534/* IOC event handler */
4535static void
4536bfa_diag_notify(void *diag_arg, enum bfa_ioc_event_e event)
4537{
4538 struct bfa_diag_s *diag = diag_arg;
4539
4540 bfa_trc(diag, event);
4541 bfa_trc(diag, diag->block);
4542 bfa_trc(diag, diag->fwping.lock);
4543 bfa_trc(diag, diag->tsensor.lock);
4544
4545 switch (event) {
4546 case BFA_IOC_E_DISABLED:
4547 case BFA_IOC_E_FAILED:
4548 if (diag->fwping.lock) {
4549 diag->fwping.status = BFA_STATUS_IOC_FAILURE;
4550 diag->fwping.cbfn(diag->fwping.cbarg,
4551 diag->fwping.status);
4552 diag->fwping.lock = 0;
4553 }
4554
4555 if (diag->tsensor.lock) {
4556 diag->tsensor.status = BFA_STATUS_IOC_FAILURE;
4557 diag->tsensor.cbfn(diag->tsensor.cbarg,
4558 diag->tsensor.status);
4559 diag->tsensor.lock = 0;
4560 }
4561
4562 if (diag->block) {
4563 if (diag->timer_active) {
4564 bfa_timer_stop(&diag->timer);
4565 diag->timer_active = 0;
4566 }
4567
4568 diag->status = BFA_STATUS_IOC_FAILURE;
4569 diag->cbfn(diag->cbarg, diag->status);
4570 diag->block = 0;
4571 }
4572 break;
4573
4574 default:
4575 break;
4576 }
4577}
4578
4579static void
4580bfa_diag_memtest_done(void *cbarg)
4581{
4582 struct bfa_diag_s *diag = cbarg;
4583 struct bfa_ioc_s *ioc = diag->ioc;
4584 struct bfa_diag_memtest_result *res = diag->result;
4585 u32 loff = BFI_BOOT_MEMTEST_RES_ADDR;
4586 u32 pgnum, pgoff, i;
4587
4588 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
4589 pgoff = PSS_SMEM_PGOFF(loff);
4590
4591 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
4592
4593 for (i = 0; i < (sizeof(struct bfa_diag_memtest_result) /
4594 sizeof(u32)); i++) {
4595 /* read test result from smem */
4596 *((u32 *) res + i) =
4597 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
4598 loff += sizeof(u32);
4599 }
4600
4601 /* Reset IOC fwstates to BFI_IOC_UNINIT */
4602 bfa_ioc_reset_fwstate(ioc);
4603
4604 res->status = swab32(res->status);
4605 bfa_trc(diag, res->status);
4606
4607 if (res->status == BFI_BOOT_MEMTEST_RES_SIG)
4608 diag->status = BFA_STATUS_OK;
4609 else {
4610 diag->status = BFA_STATUS_MEMTEST_FAILED;
4611 res->addr = swab32(res->addr);
4612 res->exp = swab32(res->exp);
4613 res->act = swab32(res->act);
4614 res->err_status = swab32(res->err_status);
4615 res->err_status1 = swab32(res->err_status1);
4616 res->err_addr = swab32(res->err_addr);
4617 bfa_trc(diag, res->addr);
4618 bfa_trc(diag, res->exp);
4619 bfa_trc(diag, res->act);
4620 bfa_trc(diag, res->err_status);
4621 bfa_trc(diag, res->err_status1);
4622 bfa_trc(diag, res->err_addr);
4623 }
4624 diag->timer_active = 0;
4625 diag->cbfn(diag->cbarg, diag->status);
4626 diag->block = 0;
4627}
4628
4629/*
4630 * Firmware ping
4631 */
4632
4633/*
4634 * Perform DMA test directly
4635 */
4636static void
4637diag_fwping_send(struct bfa_diag_s *diag)
4638{
4639 struct bfi_diag_fwping_req_s *fwping_req;
4640 u32 i;
4641
4642 bfa_trc(diag, diag->fwping.dbuf_pa);
4643
4644 /* fill DMA area with pattern */
4645 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++)
4646 *((u32 *)diag->fwping.dbuf_kva + i) = diag->fwping.data;
4647
4648 /* Fill mbox msg */
4649 fwping_req = (struct bfi_diag_fwping_req_s *)diag->fwping.mbcmd.msg;
4650
4651 /* Setup SG list */
4652 bfa_alen_set(&fwping_req->alen, BFI_DIAG_DMA_BUF_SZ,
4653 diag->fwping.dbuf_pa);
4654 /* Set up dma count */
4655 fwping_req->count = cpu_to_be32(diag->fwping.count);
4656 /* Set up data pattern */
4657 fwping_req->data = diag->fwping.data;
4658
4659 /* build host command */
4660 bfi_h2i_set(fwping_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_FWPING,
4661 bfa_ioc_portid(diag->ioc));
4662
4663 /* send mbox cmd */
4664 bfa_ioc_mbox_queue(diag->ioc, &diag->fwping.mbcmd);
4665}
4666
4667static void
4668diag_fwping_comp(struct bfa_diag_s *diag,
4669 struct bfi_diag_fwping_rsp_s *diag_rsp)
4670{
4671 u32 rsp_data = diag_rsp->data;
4672 u8 rsp_dma_status = diag_rsp->dma_status;
4673
4674 bfa_trc(diag, rsp_data);
4675 bfa_trc(diag, rsp_dma_status);
4676
4677 if (rsp_dma_status == BFA_STATUS_OK) {
4678 u32 i, pat;
4679 pat = (diag->fwping.count & 0x1) ? ~(diag->fwping.data) :
4680 diag->fwping.data;
4681 /* Check mbox data */
4682 if (diag->fwping.data != rsp_data) {
4683 bfa_trc(diag, rsp_data);
4684 diag->fwping.result->dmastatus =
4685 BFA_STATUS_DATACORRUPTED;
4686 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4687 diag->fwping.cbfn(diag->fwping.cbarg,
4688 diag->fwping.status);
4689 diag->fwping.lock = 0;
4690 return;
4691 }
4692 /* Check dma pattern */
4693 for (i = 0; i < (BFI_DIAG_DMA_BUF_SZ >> 2); i++) {
4694 if (*((u32 *)diag->fwping.dbuf_kva + i) != pat) {
4695 bfa_trc(diag, i);
4696 bfa_trc(diag, pat);
4697 bfa_trc(diag,
4698 *((u32 *)diag->fwping.dbuf_kva + i));
4699 diag->fwping.result->dmastatus =
4700 BFA_STATUS_DATACORRUPTED;
4701 diag->fwping.status = BFA_STATUS_DATACORRUPTED;
4702 diag->fwping.cbfn(diag->fwping.cbarg,
4703 diag->fwping.status);
4704 diag->fwping.lock = 0;
4705 return;
4706 }
4707 }
4708 diag->fwping.result->dmastatus = BFA_STATUS_OK;
4709 diag->fwping.status = BFA_STATUS_OK;
4710 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4711 diag->fwping.lock = 0;
4712 } else {
4713 diag->fwping.status = BFA_STATUS_HDMA_FAILED;
4714 diag->fwping.cbfn(diag->fwping.cbarg, diag->fwping.status);
4715 diag->fwping.lock = 0;
4716 }
4717}
4718
4719/*
4720 * Temperature Sensor
4721 */
4722
4723static void
4724diag_tempsensor_send(struct bfa_diag_s *diag)
4725{
4726 struct bfi_diag_ts_req_s *msg;
4727
4728 msg = (struct bfi_diag_ts_req_s *)diag->tsensor.mbcmd.msg;
4729 bfa_trc(diag, msg->temp);
4730 /* build host command */
4731 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_TEMPSENSOR,
4732 bfa_ioc_portid(diag->ioc));
4733 /* send mbox cmd */
4734 bfa_ioc_mbox_queue(diag->ioc, &diag->tsensor.mbcmd);
4735}
4736
4737static void
4738diag_tempsensor_comp(struct bfa_diag_s *diag, bfi_diag_ts_rsp_t *rsp)
4739{
4740 if (!diag->tsensor.lock) {
4741 /* receiving response after ioc failure */
4742 bfa_trc(diag, diag->tsensor.lock);
4743 return;
4744 }
4745
4746 /*
4747 * ASIC junction tempsensor is a reg read operation
4748 * it will always return OK
4749 */
4750 diag->tsensor.temp->temp = be16_to_cpu(rsp->temp);
4751 diag->tsensor.temp->ts_junc = rsp->ts_junc;
4752 diag->tsensor.temp->ts_brd = rsp->ts_brd;
4753 diag->tsensor.temp->status = BFA_STATUS_OK;
4754
4755 if (rsp->ts_brd) {
4756 if (rsp->status == BFA_STATUS_OK) {
4757 diag->tsensor.temp->brd_temp =
4758 be16_to_cpu(rsp->brd_temp);
4759 } else {
4760 bfa_trc(diag, rsp->status);
4761 diag->tsensor.temp->brd_temp = 0;
4762 diag->tsensor.temp->status = BFA_STATUS_DEVBUSY;
4763 }
4764 }
4765 bfa_trc(diag, rsp->ts_junc);
4766 bfa_trc(diag, rsp->temp);
4767 bfa_trc(diag, rsp->ts_brd);
4768 bfa_trc(diag, rsp->brd_temp);
4769 diag->tsensor.cbfn(diag->tsensor.cbarg, diag->tsensor.status);
4770 diag->tsensor.lock = 0;
4771}
4772
4773/*
4774 * LED Test command
4775 */
4776static void
4777diag_ledtest_send(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
4778{
4779 struct bfi_diag_ledtest_req_s *msg;
4780
4781 msg = (struct bfi_diag_ledtest_req_s *)diag->ledtest.mbcmd.msg;
4782 /* build host command */
4783 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LEDTEST,
4784 bfa_ioc_portid(diag->ioc));
4785
4786 /*
4787 * convert the freq from N blinks per 10 sec to
4788 * crossbow ontime value. We do it here because division is need
4789 */
4790 if (ledtest->freq)
4791 ledtest->freq = 500 / ledtest->freq;
4792
4793 if (ledtest->freq == 0)
4794 ledtest->freq = 1;
4795
4796 bfa_trc(diag, ledtest->freq);
4797 /* mcpy(&ledtest_req->req, ledtest, sizeof(bfa_diag_ledtest_t)); */
4798 msg->cmd = (u8) ledtest->cmd;
4799 msg->color = (u8) ledtest->color;
4800 msg->portid = bfa_ioc_portid(diag->ioc);
4801 msg->led = ledtest->led;
4802 msg->freq = cpu_to_be16(ledtest->freq);
4803
4804 /* send mbox cmd */
4805 bfa_ioc_mbox_queue(diag->ioc, &diag->ledtest.mbcmd);
4806}
4807
4808static void
Krishna Gudipati89196782012-03-13 17:38:56 -07004809diag_ledtest_comp(struct bfa_diag_s *diag, struct bfi_diag_ledtest_rsp_s *msg)
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004810{
4811 bfa_trc(diag, diag->ledtest.lock);
4812 diag->ledtest.lock = BFA_FALSE;
4813 /* no bfa_cb_queue is needed because driver is not waiting */
4814}
4815
4816/*
4817 * Port beaconing
4818 */
4819static void
4820diag_portbeacon_send(struct bfa_diag_s *diag, bfa_boolean_t beacon, u32 sec)
4821{
4822 struct bfi_diag_portbeacon_req_s *msg;
4823
4824 msg = (struct bfi_diag_portbeacon_req_s *)diag->beacon.mbcmd.msg;
4825 /* build host command */
4826 bfi_h2i_set(msg->mh, BFI_MC_DIAG, BFI_DIAG_H2I_PORTBEACON,
4827 bfa_ioc_portid(diag->ioc));
4828 msg->beacon = beacon;
4829 msg->period = cpu_to_be32(sec);
4830 /* send mbox cmd */
4831 bfa_ioc_mbox_queue(diag->ioc, &diag->beacon.mbcmd);
4832}
4833
4834static void
4835diag_portbeacon_comp(struct bfa_diag_s *diag)
4836{
4837 bfa_trc(diag, diag->beacon.state);
4838 diag->beacon.state = BFA_FALSE;
4839 if (diag->cbfn_beacon)
4840 diag->cbfn_beacon(diag->dev, BFA_FALSE, diag->beacon.link_e2e);
4841}
4842
4843/*
4844 * Diag hmbox handler
4845 */
4846void
4847bfa_diag_intr(void *diagarg, struct bfi_mbmsg_s *msg)
4848{
4849 struct bfa_diag_s *diag = diagarg;
4850
4851 switch (msg->mh.msg_id) {
4852 case BFI_DIAG_I2H_PORTBEACON:
4853 diag_portbeacon_comp(diag);
4854 break;
4855 case BFI_DIAG_I2H_FWPING:
4856 diag_fwping_comp(diag, (struct bfi_diag_fwping_rsp_s *) msg);
4857 break;
4858 case BFI_DIAG_I2H_TEMPSENSOR:
4859 diag_tempsensor_comp(diag, (bfi_diag_ts_rsp_t *) msg);
4860 break;
4861 case BFI_DIAG_I2H_LEDTEST:
4862 diag_ledtest_comp(diag, (struct bfi_diag_ledtest_rsp_s *) msg);
4863 break;
4864 default:
4865 bfa_trc(diag, msg->mh.msg_id);
4866 WARN_ON(1);
4867 }
4868}
4869
4870/*
4871 * Gen RAM Test
4872 *
4873 * @param[in] *diag - diag data struct
4874 * @param[in] *memtest - mem test params input from upper layer,
4875 * @param[in] pattern - mem test pattern
4876 * @param[in] *result - mem test result
4877 * @param[in] cbfn - mem test callback functioin
4878 * @param[in] cbarg - callback functioin arg
4879 *
4880 * @param[out]
4881 */
4882bfa_status_t
4883bfa_diag_memtest(struct bfa_diag_s *diag, struct bfa_diag_memtest_s *memtest,
4884 u32 pattern, struct bfa_diag_memtest_result *result,
4885 bfa_cb_diag_t cbfn, void *cbarg)
4886{
4887 bfa_trc(diag, pattern);
4888
4889 if (!bfa_ioc_adapter_is_disabled(diag->ioc))
4890 return BFA_STATUS_ADAPTER_ENABLED;
4891
4892 /* check to see if there is another destructive diag cmd running */
4893 if (diag->block) {
4894 bfa_trc(diag, diag->block);
4895 return BFA_STATUS_DEVBUSY;
4896 } else
4897 diag->block = 1;
4898
4899 diag->result = result;
4900 diag->cbfn = cbfn;
4901 diag->cbarg = cbarg;
4902
4903 /* download memtest code and take LPU0 out of reset */
4904 bfa_ioc_boot(diag->ioc, BFI_FWBOOT_TYPE_MEMTEST, BFI_FWBOOT_ENV_OS);
4905
4906 bfa_timer_begin(diag->ioc->timer_mod, &diag->timer,
4907 bfa_diag_memtest_done, diag, BFA_DIAG_MEMTEST_TOV);
4908 diag->timer_active = 1;
4909 return BFA_STATUS_OK;
4910}
4911
4912/*
4913 * DIAG firmware ping command
4914 *
4915 * @param[in] *diag - diag data struct
4916 * @param[in] cnt - dma loop count for testing PCIE
4917 * @param[in] data - data pattern to pass in fw
4918 * @param[in] *result - pt to bfa_diag_fwping_result_t data struct
4919 * @param[in] cbfn - callback function
4920 * @param[in] *cbarg - callback functioin arg
4921 *
4922 * @param[out]
4923 */
4924bfa_status_t
4925bfa_diag_fwping(struct bfa_diag_s *diag, u32 cnt, u32 data,
4926 struct bfa_diag_results_fwping *result, bfa_cb_diag_t cbfn,
4927 void *cbarg)
4928{
4929 bfa_trc(diag, cnt);
4930 bfa_trc(diag, data);
4931
4932 if (!bfa_ioc_is_operational(diag->ioc))
4933 return BFA_STATUS_IOC_NON_OP;
4934
4935 if (bfa_asic_id_ct2(bfa_ioc_devid((diag->ioc))) &&
4936 ((diag->ioc)->clscode == BFI_PCIFN_CLASS_ETH))
4937 return BFA_STATUS_CMD_NOTSUPP;
4938
4939 /* check to see if there is another destructive diag cmd running */
4940 if (diag->block || diag->fwping.lock) {
4941 bfa_trc(diag, diag->block);
4942 bfa_trc(diag, diag->fwping.lock);
4943 return BFA_STATUS_DEVBUSY;
4944 }
4945
4946 /* Initialization */
4947 diag->fwping.lock = 1;
4948 diag->fwping.cbfn = cbfn;
4949 diag->fwping.cbarg = cbarg;
4950 diag->fwping.result = result;
4951 diag->fwping.data = data;
4952 diag->fwping.count = cnt;
4953
4954 /* Init test results */
4955 diag->fwping.result->data = 0;
4956 diag->fwping.result->status = BFA_STATUS_OK;
4957
4958 /* kick off the first ping */
4959 diag_fwping_send(diag);
4960 return BFA_STATUS_OK;
4961}
4962
4963/*
4964 * Read Temperature Sensor
4965 *
4966 * @param[in] *diag - diag data struct
4967 * @param[in] *result - pt to bfa_diag_temp_t data struct
4968 * @param[in] cbfn - callback function
4969 * @param[in] *cbarg - callback functioin arg
4970 *
4971 * @param[out]
4972 */
4973bfa_status_t
4974bfa_diag_tsensor_query(struct bfa_diag_s *diag,
4975 struct bfa_diag_results_tempsensor_s *result,
4976 bfa_cb_diag_t cbfn, void *cbarg)
4977{
4978 /* check to see if there is a destructive diag cmd running */
4979 if (diag->block || diag->tsensor.lock) {
4980 bfa_trc(diag, diag->block);
4981 bfa_trc(diag, diag->tsensor.lock);
4982 return BFA_STATUS_DEVBUSY;
4983 }
4984
4985 if (!bfa_ioc_is_operational(diag->ioc))
4986 return BFA_STATUS_IOC_NON_OP;
4987
4988 /* Init diag mod params */
4989 diag->tsensor.lock = 1;
4990 diag->tsensor.temp = result;
4991 diag->tsensor.cbfn = cbfn;
4992 diag->tsensor.cbarg = cbarg;
4993
4994 /* Send msg to fw */
4995 diag_tempsensor_send(diag);
4996
4997 return BFA_STATUS_OK;
4998}
4999
5000/*
5001 * LED Test command
5002 *
5003 * @param[in] *diag - diag data struct
5004 * @param[in] *ledtest - pt to ledtest data structure
5005 *
5006 * @param[out]
5007 */
5008bfa_status_t
5009bfa_diag_ledtest(struct bfa_diag_s *diag, struct bfa_diag_ledtest_s *ledtest)
5010{
5011 bfa_trc(diag, ledtest->cmd);
5012
5013 if (!bfa_ioc_is_operational(diag->ioc))
5014 return BFA_STATUS_IOC_NON_OP;
5015
5016 if (diag->beacon.state)
5017 return BFA_STATUS_BEACON_ON;
5018
5019 if (diag->ledtest.lock)
5020 return BFA_STATUS_LEDTEST_OP;
5021
5022 /* Send msg to fw */
5023 diag->ledtest.lock = BFA_TRUE;
5024 diag_ledtest_send(diag, ledtest);
5025
5026 return BFA_STATUS_OK;
5027}
5028
5029/*
5030 * Port beaconing command
5031 *
5032 * @param[in] *diag - diag data struct
5033 * @param[in] beacon - port beaconing 1:ON 0:OFF
5034 * @param[in] link_e2e_beacon - link beaconing 1:ON 0:OFF
5035 * @param[in] sec - beaconing duration in seconds
5036 *
5037 * @param[out]
5038 */
5039bfa_status_t
5040bfa_diag_beacon_port(struct bfa_diag_s *diag, bfa_boolean_t beacon,
5041 bfa_boolean_t link_e2e_beacon, uint32_t sec)
5042{
5043 bfa_trc(diag, beacon);
5044 bfa_trc(diag, link_e2e_beacon);
5045 bfa_trc(diag, sec);
5046
5047 if (!bfa_ioc_is_operational(diag->ioc))
5048 return BFA_STATUS_IOC_NON_OP;
5049
5050 if (diag->ledtest.lock)
5051 return BFA_STATUS_LEDTEST_OP;
5052
5053 if (diag->beacon.state && beacon) /* beacon alread on */
5054 return BFA_STATUS_BEACON_ON;
5055
5056 diag->beacon.state = beacon;
5057 diag->beacon.link_e2e = link_e2e_beacon;
5058 if (diag->cbfn_beacon)
5059 diag->cbfn_beacon(diag->dev, beacon, link_e2e_beacon);
5060
5061 /* Send msg to fw */
5062 diag_portbeacon_send(diag, beacon, sec);
5063
5064 return BFA_STATUS_OK;
5065}
5066
5067/*
5068 * Return DMA memory needed by diag module.
5069 */
5070u32
5071bfa_diag_meminfo(void)
5072{
5073 return BFA_ROUNDUP(BFI_DIAG_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5074}
5075
5076/*
5077 * Attach virtual and physical memory for Diag.
5078 */
5079void
5080bfa_diag_attach(struct bfa_diag_s *diag, struct bfa_ioc_s *ioc, void *dev,
5081 bfa_cb_diag_beacon_t cbfn_beacon, struct bfa_trc_mod_s *trcmod)
5082{
5083 diag->dev = dev;
5084 diag->ioc = ioc;
5085 diag->trcmod = trcmod;
5086
5087 diag->block = 0;
5088 diag->cbfn = NULL;
5089 diag->cbarg = NULL;
5090 diag->result = NULL;
5091 diag->cbfn_beacon = cbfn_beacon;
5092
5093 bfa_ioc_mbox_regisr(diag->ioc, BFI_MC_DIAG, bfa_diag_intr, diag);
5094 bfa_q_qe_init(&diag->ioc_notify);
5095 bfa_ioc_notify_init(&diag->ioc_notify, bfa_diag_notify, diag);
5096 list_add_tail(&diag->ioc_notify.qe, &diag->ioc->notify_q);
5097}
5098
5099void
5100bfa_diag_memclaim(struct bfa_diag_s *diag, u8 *dm_kva, u64 dm_pa)
5101{
5102 diag->fwping.dbuf_kva = dm_kva;
5103 diag->fwping.dbuf_pa = dm_pa;
5104 memset(diag->fwping.dbuf_kva, 0, BFI_DIAG_DMA_BUF_SZ);
5105}
Krishna Gudipati3350d982011-06-24 20:28:37 -07005106
5107/*
5108 * PHY module specific
5109 */
5110#define BFA_PHY_DMA_BUF_SZ 0x02000 /* 8k dma buffer */
5111#define BFA_PHY_LOCK_STATUS 0x018878 /* phy semaphore status reg */
5112
5113static void
5114bfa_phy_ntoh32(u32 *obuf, u32 *ibuf, int sz)
5115{
5116 int i, m = sz >> 2;
5117
5118 for (i = 0; i < m; i++)
5119 obuf[i] = be32_to_cpu(ibuf[i]);
5120}
5121
5122static bfa_boolean_t
5123bfa_phy_present(struct bfa_phy_s *phy)
5124{
5125 return (phy->ioc->attr->card_type == BFA_MFG_TYPE_LIGHTNING);
5126}
5127
5128static void
5129bfa_phy_notify(void *cbarg, enum bfa_ioc_event_e event)
5130{
5131 struct bfa_phy_s *phy = cbarg;
5132
5133 bfa_trc(phy, event);
5134
5135 switch (event) {
5136 case BFA_IOC_E_DISABLED:
5137 case BFA_IOC_E_FAILED:
5138 if (phy->op_busy) {
5139 phy->status = BFA_STATUS_IOC_FAILURE;
5140 phy->cbfn(phy->cbarg, phy->status);
5141 phy->op_busy = 0;
5142 }
5143 break;
5144
5145 default:
5146 break;
5147 }
5148}
5149
5150/*
5151 * Send phy attribute query request.
5152 *
5153 * @param[in] cbarg - callback argument
5154 */
5155static void
5156bfa_phy_query_send(void *cbarg)
5157{
5158 struct bfa_phy_s *phy = cbarg;
5159 struct bfi_phy_query_req_s *msg =
5160 (struct bfi_phy_query_req_s *) phy->mb.msg;
5161
5162 msg->instance = phy->instance;
5163 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_QUERY_REQ,
5164 bfa_ioc_portid(phy->ioc));
5165 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_attr_s), phy->dbuf_pa);
5166 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5167}
5168
5169/*
5170 * Send phy write request.
5171 *
5172 * @param[in] cbarg - callback argument
5173 */
5174static void
5175bfa_phy_write_send(void *cbarg)
5176{
5177 struct bfa_phy_s *phy = cbarg;
5178 struct bfi_phy_write_req_s *msg =
5179 (struct bfi_phy_write_req_s *) phy->mb.msg;
5180 u32 len;
5181 u16 *buf, *dbuf;
5182 int i, sz;
5183
5184 msg->instance = phy->instance;
5185 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5186 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5187 phy->residue : BFA_PHY_DMA_BUF_SZ;
5188 msg->length = cpu_to_be32(len);
5189
5190 /* indicate if it's the last msg of the whole write operation */
5191 msg->last = (len == phy->residue) ? 1 : 0;
5192
5193 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_WRITE_REQ,
5194 bfa_ioc_portid(phy->ioc));
5195 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5196
5197 buf = (u16 *) (phy->ubuf + phy->offset);
5198 dbuf = (u16 *)phy->dbuf_kva;
5199 sz = len >> 1;
5200 for (i = 0; i < sz; i++)
5201 buf[i] = cpu_to_be16(dbuf[i]);
5202
5203 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5204
5205 phy->residue -= len;
5206 phy->offset += len;
5207}
5208
5209/*
5210 * Send phy read request.
5211 *
5212 * @param[in] cbarg - callback argument
5213 */
5214static void
5215bfa_phy_read_send(void *cbarg)
5216{
5217 struct bfa_phy_s *phy = cbarg;
5218 struct bfi_phy_read_req_s *msg =
5219 (struct bfi_phy_read_req_s *) phy->mb.msg;
5220 u32 len;
5221
5222 msg->instance = phy->instance;
5223 msg->offset = cpu_to_be32(phy->addr_off + phy->offset);
5224 len = (phy->residue < BFA_PHY_DMA_BUF_SZ) ?
5225 phy->residue : BFA_PHY_DMA_BUF_SZ;
5226 msg->length = cpu_to_be32(len);
5227 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_READ_REQ,
5228 bfa_ioc_portid(phy->ioc));
5229 bfa_alen_set(&msg->alen, len, phy->dbuf_pa);
5230 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5231}
5232
5233/*
5234 * Send phy stats request.
5235 *
5236 * @param[in] cbarg - callback argument
5237 */
5238static void
5239bfa_phy_stats_send(void *cbarg)
5240{
5241 struct bfa_phy_s *phy = cbarg;
5242 struct bfi_phy_stats_req_s *msg =
5243 (struct bfi_phy_stats_req_s *) phy->mb.msg;
5244
5245 msg->instance = phy->instance;
5246 bfi_h2i_set(msg->mh, BFI_MC_PHY, BFI_PHY_H2I_STATS_REQ,
5247 bfa_ioc_portid(phy->ioc));
5248 bfa_alen_set(&msg->alen, sizeof(struct bfa_phy_stats_s), phy->dbuf_pa);
5249 bfa_ioc_mbox_queue(phy->ioc, &phy->mb);
5250}
5251
5252/*
5253 * Flash memory info API.
5254 *
5255 * @param[in] mincfg - minimal cfg variable
5256 */
5257u32
5258bfa_phy_meminfo(bfa_boolean_t mincfg)
5259{
5260 /* min driver doesn't need phy */
5261 if (mincfg)
5262 return 0;
5263
5264 return BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5265}
5266
5267/*
5268 * Flash attach API.
5269 *
5270 * @param[in] phy - phy structure
5271 * @param[in] ioc - ioc structure
5272 * @param[in] dev - device structure
5273 * @param[in] trcmod - trace module
5274 * @param[in] logmod - log module
5275 */
5276void
5277bfa_phy_attach(struct bfa_phy_s *phy, struct bfa_ioc_s *ioc, void *dev,
5278 struct bfa_trc_mod_s *trcmod, bfa_boolean_t mincfg)
5279{
5280 phy->ioc = ioc;
5281 phy->trcmod = trcmod;
5282 phy->cbfn = NULL;
5283 phy->cbarg = NULL;
5284 phy->op_busy = 0;
5285
5286 bfa_ioc_mbox_regisr(phy->ioc, BFI_MC_PHY, bfa_phy_intr, phy);
5287 bfa_q_qe_init(&phy->ioc_notify);
5288 bfa_ioc_notify_init(&phy->ioc_notify, bfa_phy_notify, phy);
5289 list_add_tail(&phy->ioc_notify.qe, &phy->ioc->notify_q);
5290
5291 /* min driver doesn't need phy */
5292 if (mincfg) {
5293 phy->dbuf_kva = NULL;
5294 phy->dbuf_pa = 0;
5295 }
5296}
5297
5298/*
5299 * Claim memory for phy
5300 *
5301 * @param[in] phy - phy structure
5302 * @param[in] dm_kva - pointer to virtual memory address
5303 * @param[in] dm_pa - physical memory address
5304 * @param[in] mincfg - minimal cfg variable
5305 */
5306void
5307bfa_phy_memclaim(struct bfa_phy_s *phy, u8 *dm_kva, u64 dm_pa,
5308 bfa_boolean_t mincfg)
5309{
5310 if (mincfg)
5311 return;
5312
5313 phy->dbuf_kva = dm_kva;
5314 phy->dbuf_pa = dm_pa;
5315 memset(phy->dbuf_kva, 0, BFA_PHY_DMA_BUF_SZ);
5316 dm_kva += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5317 dm_pa += BFA_ROUNDUP(BFA_PHY_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
5318}
5319
5320bfa_boolean_t
5321bfa_phy_busy(struct bfa_ioc_s *ioc)
5322{
5323 void __iomem *rb;
5324
5325 rb = bfa_ioc_bar0(ioc);
5326 return readl(rb + BFA_PHY_LOCK_STATUS);
5327}
5328
5329/*
5330 * Get phy attribute.
5331 *
5332 * @param[in] phy - phy structure
5333 * @param[in] attr - phy attribute structure
5334 * @param[in] cbfn - callback function
5335 * @param[in] cbarg - callback argument
5336 *
5337 * Return status.
5338 */
5339bfa_status_t
5340bfa_phy_get_attr(struct bfa_phy_s *phy, u8 instance,
5341 struct bfa_phy_attr_s *attr, bfa_cb_phy_t cbfn, void *cbarg)
5342{
5343 bfa_trc(phy, BFI_PHY_H2I_QUERY_REQ);
5344 bfa_trc(phy, instance);
5345
5346 if (!bfa_phy_present(phy))
5347 return BFA_STATUS_PHY_NOT_PRESENT;
5348
5349 if (!bfa_ioc_is_operational(phy->ioc))
5350 return BFA_STATUS_IOC_NON_OP;
5351
5352 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5353 bfa_trc(phy, phy->op_busy);
5354 return BFA_STATUS_DEVBUSY;
5355 }
5356
5357 phy->op_busy = 1;
5358 phy->cbfn = cbfn;
5359 phy->cbarg = cbarg;
5360 phy->instance = instance;
5361 phy->ubuf = (uint8_t *) attr;
5362 bfa_phy_query_send(phy);
5363
5364 return BFA_STATUS_OK;
5365}
5366
5367/*
5368 * Get phy stats.
5369 *
5370 * @param[in] phy - phy structure
5371 * @param[in] instance - phy image instance
5372 * @param[in] stats - pointer to phy stats
5373 * @param[in] cbfn - callback function
5374 * @param[in] cbarg - callback argument
5375 *
5376 * Return status.
5377 */
5378bfa_status_t
5379bfa_phy_get_stats(struct bfa_phy_s *phy, u8 instance,
5380 struct bfa_phy_stats_s *stats,
5381 bfa_cb_phy_t cbfn, void *cbarg)
5382{
5383 bfa_trc(phy, BFI_PHY_H2I_STATS_REQ);
5384 bfa_trc(phy, instance);
5385
5386 if (!bfa_phy_present(phy))
5387 return BFA_STATUS_PHY_NOT_PRESENT;
5388
5389 if (!bfa_ioc_is_operational(phy->ioc))
5390 return BFA_STATUS_IOC_NON_OP;
5391
5392 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5393 bfa_trc(phy, phy->op_busy);
5394 return BFA_STATUS_DEVBUSY;
5395 }
5396
5397 phy->op_busy = 1;
5398 phy->cbfn = cbfn;
5399 phy->cbarg = cbarg;
5400 phy->instance = instance;
5401 phy->ubuf = (u8 *) stats;
5402 bfa_phy_stats_send(phy);
5403
5404 return BFA_STATUS_OK;
5405}
5406
5407/*
5408 * Update phy image.
5409 *
5410 * @param[in] phy - phy structure
5411 * @param[in] instance - phy image instance
5412 * @param[in] buf - update data buffer
5413 * @param[in] len - data buffer length
5414 * @param[in] offset - offset relative to starting address
5415 * @param[in] cbfn - callback function
5416 * @param[in] cbarg - callback argument
5417 *
5418 * Return status.
5419 */
5420bfa_status_t
5421bfa_phy_update(struct bfa_phy_s *phy, u8 instance,
5422 void *buf, u32 len, u32 offset,
5423 bfa_cb_phy_t cbfn, void *cbarg)
5424{
5425 bfa_trc(phy, BFI_PHY_H2I_WRITE_REQ);
5426 bfa_trc(phy, instance);
5427 bfa_trc(phy, len);
5428 bfa_trc(phy, offset);
5429
5430 if (!bfa_phy_present(phy))
5431 return BFA_STATUS_PHY_NOT_PRESENT;
5432
5433 if (!bfa_ioc_is_operational(phy->ioc))
5434 return BFA_STATUS_IOC_NON_OP;
5435
5436 /* 'len' must be in word (4-byte) boundary */
5437 if (!len || (len & 0x03))
5438 return BFA_STATUS_FAILED;
5439
5440 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5441 bfa_trc(phy, phy->op_busy);
5442 return BFA_STATUS_DEVBUSY;
5443 }
5444
5445 phy->op_busy = 1;
5446 phy->cbfn = cbfn;
5447 phy->cbarg = cbarg;
5448 phy->instance = instance;
5449 phy->residue = len;
5450 phy->offset = 0;
5451 phy->addr_off = offset;
5452 phy->ubuf = buf;
5453
5454 bfa_phy_write_send(phy);
5455 return BFA_STATUS_OK;
5456}
5457
5458/*
5459 * Read phy image.
5460 *
5461 * @param[in] phy - phy structure
5462 * @param[in] instance - phy image instance
5463 * @param[in] buf - read data buffer
5464 * @param[in] len - data buffer length
5465 * @param[in] offset - offset relative to starting address
5466 * @param[in] cbfn - callback function
5467 * @param[in] cbarg - callback argument
5468 *
5469 * Return status.
5470 */
5471bfa_status_t
5472bfa_phy_read(struct bfa_phy_s *phy, u8 instance,
5473 void *buf, u32 len, u32 offset,
5474 bfa_cb_phy_t cbfn, void *cbarg)
5475{
5476 bfa_trc(phy, BFI_PHY_H2I_READ_REQ);
5477 bfa_trc(phy, instance);
5478 bfa_trc(phy, len);
5479 bfa_trc(phy, offset);
5480
5481 if (!bfa_phy_present(phy))
5482 return BFA_STATUS_PHY_NOT_PRESENT;
5483
5484 if (!bfa_ioc_is_operational(phy->ioc))
5485 return BFA_STATUS_IOC_NON_OP;
5486
5487 /* 'len' must be in word (4-byte) boundary */
5488 if (!len || (len & 0x03))
5489 return BFA_STATUS_FAILED;
5490
5491 if (phy->op_busy || bfa_phy_busy(phy->ioc)) {
5492 bfa_trc(phy, phy->op_busy);
5493 return BFA_STATUS_DEVBUSY;
5494 }
5495
5496 phy->op_busy = 1;
5497 phy->cbfn = cbfn;
5498 phy->cbarg = cbarg;
5499 phy->instance = instance;
5500 phy->residue = len;
5501 phy->offset = 0;
5502 phy->addr_off = offset;
5503 phy->ubuf = buf;
5504 bfa_phy_read_send(phy);
5505
5506 return BFA_STATUS_OK;
5507}
5508
5509/*
5510 * Process phy response messages upon receiving interrupts.
5511 *
5512 * @param[in] phyarg - phy structure
5513 * @param[in] msg - message structure
5514 */
5515void
5516bfa_phy_intr(void *phyarg, struct bfi_mbmsg_s *msg)
5517{
5518 struct bfa_phy_s *phy = phyarg;
5519 u32 status;
5520
5521 union {
5522 struct bfi_phy_query_rsp_s *query;
5523 struct bfi_phy_stats_rsp_s *stats;
5524 struct bfi_phy_write_rsp_s *write;
5525 struct bfi_phy_read_rsp_s *read;
5526 struct bfi_mbmsg_s *msg;
5527 } m;
5528
5529 m.msg = msg;
5530 bfa_trc(phy, msg->mh.msg_id);
5531
5532 if (!phy->op_busy) {
5533 /* receiving response after ioc failure */
5534 bfa_trc(phy, 0x9999);
5535 return;
5536 }
5537
5538 switch (msg->mh.msg_id) {
5539 case BFI_PHY_I2H_QUERY_RSP:
5540 status = be32_to_cpu(m.query->status);
5541 bfa_trc(phy, status);
5542
5543 if (status == BFA_STATUS_OK) {
5544 struct bfa_phy_attr_s *attr =
5545 (struct bfa_phy_attr_s *) phy->ubuf;
5546 bfa_phy_ntoh32((u32 *)attr, (u32 *)phy->dbuf_kva,
5547 sizeof(struct bfa_phy_attr_s));
5548 bfa_trc(phy, attr->status);
5549 bfa_trc(phy, attr->length);
5550 }
5551
5552 phy->status = status;
5553 phy->op_busy = 0;
5554 if (phy->cbfn)
5555 phy->cbfn(phy->cbarg, phy->status);
5556 break;
5557 case BFI_PHY_I2H_STATS_RSP:
5558 status = be32_to_cpu(m.stats->status);
5559 bfa_trc(phy, status);
5560
5561 if (status == BFA_STATUS_OK) {
5562 struct bfa_phy_stats_s *stats =
5563 (struct bfa_phy_stats_s *) phy->ubuf;
5564 bfa_phy_ntoh32((u32 *)stats, (u32 *)phy->dbuf_kva,
5565 sizeof(struct bfa_phy_stats_s));
5566 bfa_trc(phy, stats->status);
5567 }
5568
5569 phy->status = status;
5570 phy->op_busy = 0;
5571 if (phy->cbfn)
5572 phy->cbfn(phy->cbarg, phy->status);
5573 break;
5574 case BFI_PHY_I2H_WRITE_RSP:
5575 status = be32_to_cpu(m.write->status);
5576 bfa_trc(phy, status);
5577
5578 if (status != BFA_STATUS_OK || phy->residue == 0) {
5579 phy->status = status;
5580 phy->op_busy = 0;
5581 if (phy->cbfn)
5582 phy->cbfn(phy->cbarg, phy->status);
5583 } else {
5584 bfa_trc(phy, phy->offset);
5585 bfa_phy_write_send(phy);
5586 }
5587 break;
5588 case BFI_PHY_I2H_READ_RSP:
5589 status = be32_to_cpu(m.read->status);
5590 bfa_trc(phy, status);
5591
5592 if (status != BFA_STATUS_OK) {
5593 phy->status = status;
5594 phy->op_busy = 0;
5595 if (phy->cbfn)
5596 phy->cbfn(phy->cbarg, phy->status);
5597 } else {
5598 u32 len = be32_to_cpu(m.read->length);
5599 u16 *buf = (u16 *)(phy->ubuf + phy->offset);
5600 u16 *dbuf = (u16 *)phy->dbuf_kva;
5601 int i, sz = len >> 1;
5602
5603 bfa_trc(phy, phy->offset);
5604 bfa_trc(phy, len);
5605
5606 for (i = 0; i < sz; i++)
5607 buf[i] = be16_to_cpu(dbuf[i]);
5608
5609 phy->residue -= len;
5610 phy->offset += len;
5611
5612 if (phy->residue == 0) {
5613 phy->status = status;
5614 phy->op_busy = 0;
5615 if (phy->cbfn)
5616 phy->cbfn(phy->cbarg, phy->status);
5617 } else
5618 bfa_phy_read_send(phy);
5619 }
5620 break;
5621 default:
5622 WARN_ON(1);
5623 }
5624}
Krishna Gudipati45c5dc12011-07-20 17:03:46 -07005625
5626/*
5627 * DCONF module specific
5628 */
5629
5630BFA_MODULE(dconf);
5631
5632/*
5633 * DCONF state machine events
5634 */
5635enum bfa_dconf_event {
5636 BFA_DCONF_SM_INIT = 1, /* dconf Init */
5637 BFA_DCONF_SM_FLASH_COMP = 2, /* read/write to flash */
5638 BFA_DCONF_SM_WR = 3, /* binding change, map */
5639 BFA_DCONF_SM_TIMEOUT = 4, /* Start timer */
5640 BFA_DCONF_SM_EXIT = 5, /* exit dconf module */
5641 BFA_DCONF_SM_IOCDISABLE = 6, /* IOC disable event */
5642};
5643
5644/* forward declaration of DCONF state machine */
5645static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf,
5646 enum bfa_dconf_event event);
5647static void bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5648 enum bfa_dconf_event event);
5649static void bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf,
5650 enum bfa_dconf_event event);
5651static void bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf,
5652 enum bfa_dconf_event event);
5653static void bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf,
5654 enum bfa_dconf_event event);
5655static void bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5656 enum bfa_dconf_event event);
5657static void bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5658 enum bfa_dconf_event event);
5659
5660static void bfa_dconf_cbfn(void *dconf, bfa_status_t status);
5661static void bfa_dconf_timer(void *cbarg);
5662static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf);
5663static void bfa_dconf_init_cb(void *arg, bfa_status_t status);
5664
5665/*
5666 * Begining state of dconf module. Waiting for an event to start.
5667 */
5668static void
5669bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5670{
5671 bfa_status_t bfa_status;
5672 bfa_trc(dconf->bfa, event);
5673
5674 switch (event) {
5675 case BFA_DCONF_SM_INIT:
5676 if (dconf->min_cfg) {
5677 bfa_trc(dconf->bfa, dconf->min_cfg);
5678 return;
5679 }
5680 bfa_sm_set_state(dconf, bfa_dconf_sm_flash_read);
5681 dconf->flashdone = BFA_FALSE;
5682 bfa_trc(dconf->bfa, dconf->flashdone);
5683 bfa_status = bfa_flash_read_part(BFA_FLASH(dconf->bfa),
5684 BFA_FLASH_PART_DRV, dconf->instance,
5685 dconf->dconf,
5686 sizeof(struct bfa_dconf_s), 0,
5687 bfa_dconf_init_cb, dconf->bfa);
5688 if (bfa_status != BFA_STATUS_OK) {
5689 bfa_dconf_init_cb(dconf->bfa, BFA_STATUS_FAILED);
5690 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5691 return;
5692 }
5693 break;
5694 case BFA_DCONF_SM_EXIT:
5695 dconf->flashdone = BFA_TRUE;
5696 case BFA_DCONF_SM_IOCDISABLE:
5697 case BFA_DCONF_SM_WR:
5698 case BFA_DCONF_SM_FLASH_COMP:
5699 break;
5700 default:
5701 bfa_sm_fault(dconf->bfa, event);
5702 }
5703}
5704
5705/*
5706 * Read flash for dconf entries and make a call back to the driver once done.
5707 */
5708static void
5709bfa_dconf_sm_flash_read(struct bfa_dconf_mod_s *dconf,
5710 enum bfa_dconf_event event)
5711{
5712 bfa_trc(dconf->bfa, event);
5713
5714 switch (event) {
5715 case BFA_DCONF_SM_FLASH_COMP:
5716 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5717 break;
5718 case BFA_DCONF_SM_TIMEOUT:
5719 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5720 break;
5721 case BFA_DCONF_SM_EXIT:
5722 dconf->flashdone = BFA_TRUE;
5723 bfa_trc(dconf->bfa, dconf->flashdone);
5724 case BFA_DCONF_SM_IOCDISABLE:
5725 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5726 break;
5727 default:
5728 bfa_sm_fault(dconf->bfa, event);
5729 }
5730}
5731
5732/*
5733 * DCONF Module is in ready state. Has completed the initialization.
5734 */
5735static void
5736bfa_dconf_sm_ready(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5737{
5738 bfa_trc(dconf->bfa, event);
5739
5740 switch (event) {
5741 case BFA_DCONF_SM_WR:
5742 bfa_timer_start(dconf->bfa, &dconf->timer,
5743 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5744 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5745 break;
5746 case BFA_DCONF_SM_EXIT:
5747 dconf->flashdone = BFA_TRUE;
5748 bfa_trc(dconf->bfa, dconf->flashdone);
5749 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5750 break;
5751 case BFA_DCONF_SM_INIT:
5752 case BFA_DCONF_SM_IOCDISABLE:
5753 break;
5754 default:
5755 bfa_sm_fault(dconf->bfa, event);
5756 }
5757}
5758
5759/*
5760 * entries are dirty, write back to the flash.
5761 */
5762
5763static void
5764bfa_dconf_sm_dirty(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5765{
5766 bfa_trc(dconf->bfa, event);
5767
5768 switch (event) {
5769 case BFA_DCONF_SM_TIMEOUT:
5770 bfa_sm_set_state(dconf, bfa_dconf_sm_sync);
5771 bfa_dconf_flash_write(dconf);
5772 break;
5773 case BFA_DCONF_SM_WR:
5774 bfa_timer_stop(&dconf->timer);
5775 bfa_timer_start(dconf->bfa, &dconf->timer,
5776 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5777 break;
5778 case BFA_DCONF_SM_EXIT:
5779 bfa_timer_stop(&dconf->timer);
5780 bfa_timer_start(dconf->bfa, &dconf->timer,
5781 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5782 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5783 bfa_dconf_flash_write(dconf);
5784 break;
5785 case BFA_DCONF_SM_FLASH_COMP:
5786 break;
5787 case BFA_DCONF_SM_IOCDISABLE:
5788 bfa_timer_stop(&dconf->timer);
5789 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5790 break;
5791 default:
5792 bfa_sm_fault(dconf->bfa, event);
5793 }
5794}
5795
5796/*
5797 * Sync the dconf entries to the flash.
5798 */
5799static void
5800bfa_dconf_sm_final_sync(struct bfa_dconf_mod_s *dconf,
5801 enum bfa_dconf_event event)
5802{
5803 bfa_trc(dconf->bfa, event);
5804
5805 switch (event) {
5806 case BFA_DCONF_SM_IOCDISABLE:
5807 case BFA_DCONF_SM_FLASH_COMP:
5808 bfa_timer_stop(&dconf->timer);
5809 case BFA_DCONF_SM_TIMEOUT:
5810 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5811 dconf->flashdone = BFA_TRUE;
5812 bfa_trc(dconf->bfa, dconf->flashdone);
5813 bfa_ioc_disable(&dconf->bfa->ioc);
5814 break;
5815 default:
5816 bfa_sm_fault(dconf->bfa, event);
5817 }
5818}
5819
5820static void
5821bfa_dconf_sm_sync(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event)
5822{
5823 bfa_trc(dconf->bfa, event);
5824
5825 switch (event) {
5826 case BFA_DCONF_SM_FLASH_COMP:
5827 bfa_sm_set_state(dconf, bfa_dconf_sm_ready);
5828 break;
5829 case BFA_DCONF_SM_WR:
5830 bfa_timer_start(dconf->bfa, &dconf->timer,
5831 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5832 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5833 break;
5834 case BFA_DCONF_SM_EXIT:
5835 bfa_timer_start(dconf->bfa, &dconf->timer,
5836 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5837 bfa_sm_set_state(dconf, bfa_dconf_sm_final_sync);
5838 break;
5839 case BFA_DCONF_SM_IOCDISABLE:
5840 bfa_sm_set_state(dconf, bfa_dconf_sm_iocdown_dirty);
5841 break;
5842 default:
5843 bfa_sm_fault(dconf->bfa, event);
5844 }
5845}
5846
5847static void
5848bfa_dconf_sm_iocdown_dirty(struct bfa_dconf_mod_s *dconf,
5849 enum bfa_dconf_event event)
5850{
5851 bfa_trc(dconf->bfa, event);
5852
5853 switch (event) {
5854 case BFA_DCONF_SM_INIT:
5855 bfa_timer_start(dconf->bfa, &dconf->timer,
5856 bfa_dconf_timer, dconf, BFA_DCONF_UPDATE_TOV);
5857 bfa_sm_set_state(dconf, bfa_dconf_sm_dirty);
5858 break;
5859 case BFA_DCONF_SM_EXIT:
5860 dconf->flashdone = BFA_TRUE;
5861 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5862 break;
5863 case BFA_DCONF_SM_IOCDISABLE:
5864 break;
5865 default:
5866 bfa_sm_fault(dconf->bfa, event);
5867 }
5868}
5869
5870/*
5871 * Compute and return memory needed by DRV_CFG module.
5872 */
5873static void
5874bfa_dconf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5875 struct bfa_s *bfa)
5876{
5877 struct bfa_mem_kva_s *dconf_kva = BFA_MEM_DCONF_KVA(bfa);
5878
5879 if (cfg->drvcfg.min_cfg)
5880 bfa_mem_kva_setup(meminfo, dconf_kva,
5881 sizeof(struct bfa_dconf_hdr_s));
5882 else
5883 bfa_mem_kva_setup(meminfo, dconf_kva,
5884 sizeof(struct bfa_dconf_s));
5885}
5886
5887static void
5888bfa_dconf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5889 struct bfa_pcidev_s *pcidev)
5890{
5891 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5892
5893 dconf->bfad = bfad;
5894 dconf->bfa = bfa;
5895 dconf->instance = bfa->ioc.port_id;
5896 bfa_trc(bfa, dconf->instance);
5897
5898 dconf->dconf = (struct bfa_dconf_s *) bfa_mem_kva_curp(dconf);
5899 if (cfg->drvcfg.min_cfg) {
5900 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_hdr_s);
5901 dconf->min_cfg = BFA_TRUE;
5902 /*
5903 * Set the flashdone flag to TRUE explicitly as no flash
5904 * write will happen in min_cfg mode.
5905 */
5906 dconf->flashdone = BFA_TRUE;
5907 } else {
5908 dconf->min_cfg = BFA_FALSE;
5909 bfa_mem_kva_curp(dconf) += sizeof(struct bfa_dconf_s);
5910 }
5911
5912 bfa_dconf_read_data_valid(bfa) = BFA_FALSE;
5913 bfa_sm_set_state(dconf, bfa_dconf_sm_uninit);
5914}
5915
5916static void
5917bfa_dconf_init_cb(void *arg, bfa_status_t status)
5918{
5919 struct bfa_s *bfa = arg;
5920 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5921
5922 dconf->flashdone = BFA_TRUE;
5923 bfa_trc(bfa, dconf->flashdone);
5924 bfa_iocfc_cb_dconf_modinit(bfa, status);
5925 if (status == BFA_STATUS_OK) {
5926 bfa_dconf_read_data_valid(bfa) = BFA_TRUE;
5927 if (dconf->dconf->hdr.signature != BFI_DCONF_SIGNATURE)
5928 dconf->dconf->hdr.signature = BFI_DCONF_SIGNATURE;
5929 if (dconf->dconf->hdr.version != BFI_DCONF_VERSION)
5930 dconf->dconf->hdr.version = BFI_DCONF_VERSION;
5931 }
5932 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
5933}
5934
5935void
5936bfa_dconf_modinit(struct bfa_s *bfa)
5937{
5938 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5939 bfa_sm_send_event(dconf, BFA_DCONF_SM_INIT);
5940}
5941static void
5942bfa_dconf_start(struct bfa_s *bfa)
5943{
5944}
5945
5946static void
5947bfa_dconf_stop(struct bfa_s *bfa)
5948{
5949}
5950
5951static void bfa_dconf_timer(void *cbarg)
5952{
5953 struct bfa_dconf_mod_s *dconf = cbarg;
5954 bfa_sm_send_event(dconf, BFA_DCONF_SM_TIMEOUT);
5955}
5956static void
5957bfa_dconf_iocdisable(struct bfa_s *bfa)
5958{
5959 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5960 bfa_sm_send_event(dconf, BFA_DCONF_SM_IOCDISABLE);
5961}
5962
5963static void
5964bfa_dconf_detach(struct bfa_s *bfa)
5965{
5966}
5967
5968static bfa_status_t
5969bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf)
5970{
5971 bfa_status_t bfa_status;
5972 bfa_trc(dconf->bfa, 0);
5973
5974 bfa_status = bfa_flash_update_part(BFA_FLASH(dconf->bfa),
5975 BFA_FLASH_PART_DRV, dconf->instance,
5976 dconf->dconf, sizeof(struct bfa_dconf_s), 0,
5977 bfa_dconf_cbfn, dconf);
5978 if (bfa_status != BFA_STATUS_OK)
5979 WARN_ON(bfa_status);
5980 bfa_trc(dconf->bfa, bfa_status);
5981
5982 return bfa_status;
5983}
5984
5985bfa_status_t
5986bfa_dconf_update(struct bfa_s *bfa)
5987{
5988 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
5989 bfa_trc(dconf->bfa, 0);
5990 if (bfa_sm_cmp_state(dconf, bfa_dconf_sm_iocdown_dirty))
5991 return BFA_STATUS_FAILED;
5992
5993 if (dconf->min_cfg) {
5994 bfa_trc(dconf->bfa, dconf->min_cfg);
5995 return BFA_STATUS_FAILED;
5996 }
5997
5998 bfa_sm_send_event(dconf, BFA_DCONF_SM_WR);
5999 return BFA_STATUS_OK;
6000}
6001
6002static void
6003bfa_dconf_cbfn(void *arg, bfa_status_t status)
6004{
6005 struct bfa_dconf_mod_s *dconf = arg;
6006 WARN_ON(status);
6007 bfa_sm_send_event(dconf, BFA_DCONF_SM_FLASH_COMP);
6008}
6009
6010void
6011bfa_dconf_modexit(struct bfa_s *bfa)
6012{
6013 struct bfa_dconf_mod_s *dconf = BFA_DCONF_MOD(bfa);
6014 BFA_DCONF_MOD(bfa)->flashdone = BFA_FALSE;
6015 bfa_trc(bfa, BFA_DCONF_MOD(bfa)->flashdone);
6016 bfa_sm_send_event(dconf, BFA_DCONF_SM_EXIT);
6017}