blob: c07ee42345bcbdb261a4c1da74cc55f664e3d3b4 [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070018#include "bfa_modules.h"
19#include "bfa_cb_ioim.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070020
21BFA_TRC_FILE(HAL, FCPIM);
22BFA_MODULE(fcpim);
23
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070024
25#define bfa_fcpim_add_iostats(__l, __r, __stats) \
26 (__l->__stats += __r->__stats)
27
28
Jing Huang5fbe25c2010-10-18 17:17:23 -070029/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070030 * BFA ITNIM Related definitions
31 */
32static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
33
34#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
35 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
36
37#define bfa_fcpim_additn(__itnim) \
38 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
39#define bfa_fcpim_delitn(__itnim) do { \
40 bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
41 bfa_itnim_update_del_itn_stats(__itnim); \
42 list_del(&(__itnim)->qe); \
43 bfa_assert(list_empty(&(__itnim)->io_q)); \
44 bfa_assert(list_empty(&(__itnim)->io_cleanup_q)); \
45 bfa_assert(list_empty(&(__itnim)->pending_q)); \
46} while (0)
47
48#define bfa_itnim_online_cb(__itnim) do { \
49 if ((__itnim)->bfa->fcs) \
50 bfa_cb_itnim_online((__itnim)->ditn); \
51 else { \
52 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
53 __bfa_cb_itnim_online, (__itnim)); \
54 } \
55} while (0)
56
57#define bfa_itnim_offline_cb(__itnim) do { \
58 if ((__itnim)->bfa->fcs) \
59 bfa_cb_itnim_offline((__itnim)->ditn); \
60 else { \
61 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
62 __bfa_cb_itnim_offline, (__itnim)); \
63 } \
64} while (0)
65
66#define bfa_itnim_sler_cb(__itnim) do { \
67 if ((__itnim)->bfa->fcs) \
68 bfa_cb_itnim_sler((__itnim)->ditn); \
69 else { \
70 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
71 __bfa_cb_itnim_sler, (__itnim)); \
72 } \
73} while (0)
74
Jing Huang5fbe25c2010-10-18 17:17:23 -070075/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070076 * bfa_itnim_sm BFA itnim state machine
77 */
78
79
80enum bfa_itnim_event {
81 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
82 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
83 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
84 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
85 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
86 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
87 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
88 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
89 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
90};
91
Jing Huang5fbe25c2010-10-18 17:17:23 -070092/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070093 * BFA IOIM related definitions
94 */
95#define bfa_ioim_move_to_comp_q(__ioim) do { \
96 list_del(&(__ioim)->qe); \
97 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
98} while (0)
99
100
101#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
102 if ((__fcpim)->profile_comp) \
103 (__fcpim)->profile_comp(__ioim); \
104} while (0)
105
106#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
107 if ((__fcpim)->profile_start) \
108 (__fcpim)->profile_start(__ioim); \
109} while (0)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700110
Jing Huang5fbe25c2010-10-18 17:17:23 -0700111/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700112 * IO state machine events
113 */
114enum bfa_ioim_event {
115 BFA_IOIM_SM_START = 1, /* io start request from host */
116 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
117 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
118 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
119 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
120 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
121 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
122 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
123 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
124 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
125 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
126 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
127 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
128 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
129 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
130 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
131 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
132 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
133};
134
135
Jing Huang5fbe25c2010-10-18 17:17:23 -0700136/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700137 * BFA TSKIM related definitions
138 */
139
Jing Huang5fbe25c2010-10-18 17:17:23 -0700140/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700141 * task management completion handling
142 */
143#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
144 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
145 bfa_tskim_notify_comp(__tskim); \
146} while (0)
147
148#define bfa_tskim_notify_comp(__tskim) do { \
149 if ((__tskim)->notify) \
150 bfa_itnim_tskdone((__tskim)->itnim); \
151} while (0)
152
153
154enum bfa_tskim_event {
155 BFA_TSKIM_SM_START = 1, /* TM command start */
156 BFA_TSKIM_SM_DONE = 2, /* TM completion */
157 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
158 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
159 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
160 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
161 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
162 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
163};
164
Jing Huang5fbe25c2010-10-18 17:17:23 -0700165/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700166 * forward declaration for BFA ITNIM functions
167 */
168static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
169static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
170static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
171static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
172static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
173static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
174static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
175static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
176static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
177static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
178static void bfa_itnim_iotov(void *itnim_arg);
179static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
180static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
181static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
182
Jing Huang5fbe25c2010-10-18 17:17:23 -0700183/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700184 * forward declaration of ITNIM state machine
185 */
186static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
187 enum bfa_itnim_event event);
188static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
189 enum bfa_itnim_event event);
190static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
191 enum bfa_itnim_event event);
192static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
193 enum bfa_itnim_event event);
194static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
195 enum bfa_itnim_event event);
196static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
197 enum bfa_itnim_event event);
198static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
199 enum bfa_itnim_event event);
200static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
201 enum bfa_itnim_event event);
202static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
203 enum bfa_itnim_event event);
204static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
205 enum bfa_itnim_event event);
206static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
207 enum bfa_itnim_event event);
208static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
209 enum bfa_itnim_event event);
210static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
211 enum bfa_itnim_event event);
212static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
213 enum bfa_itnim_event event);
214static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
215 enum bfa_itnim_event event);
216
Jing Huang5fbe25c2010-10-18 17:17:23 -0700217/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700218 * forward declaration for BFA IOIM functions
219 */
220static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
Maggie Zhange3e7d3e2010-12-09 19:10:27 -0800221static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700222static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
223static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
224static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
225static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
226static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
227static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
228static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
229static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
230
231
Jing Huang5fbe25c2010-10-18 17:17:23 -0700232/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700233 * forward declaration of BFA IO state machine
234 */
235static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
236 enum bfa_ioim_event event);
237static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
238 enum bfa_ioim_event event);
239static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
240 enum bfa_ioim_event event);
241static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
242 enum bfa_ioim_event event);
243static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
244 enum bfa_ioim_event event);
245static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
246 enum bfa_ioim_event event);
247static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
248 enum bfa_ioim_event event);
249static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
250 enum bfa_ioim_event event);
251static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
252 enum bfa_ioim_event event);
253static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
254 enum bfa_ioim_event event);
255static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
256 enum bfa_ioim_event event);
257static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
258 enum bfa_ioim_event event);
259
Jing Huang5fbe25c2010-10-18 17:17:23 -0700260/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700261 * forward declaration for BFA TSKIM functions
262 */
263static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
264static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
265static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
266 lun_t lun);
267static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
268static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
269static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
270static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
271static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
272static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
273
274
Jing Huang5fbe25c2010-10-18 17:17:23 -0700275/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700276 * forward declaration of BFA TSKIM state machine
277 */
278static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
279 enum bfa_tskim_event event);
280static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
281 enum bfa_tskim_event event);
282static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
283 enum bfa_tskim_event event);
284static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
285 enum bfa_tskim_event event);
286static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
287 enum bfa_tskim_event event);
288static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
289 enum bfa_tskim_event event);
290static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
291 enum bfa_tskim_event event);
292
Jing Huang5fbe25c2010-10-18 17:17:23 -0700293/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800294 * BFA FCP Initiator Mode module
Jing Huang7725ccf2009-09-23 17:46:15 -0700295 */
296
Jing Huang5fbe25c2010-10-18 17:17:23 -0700297/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700298 * Compute and return memory needed by FCP(im) module.
Jing Huang7725ccf2009-09-23 17:46:15 -0700299 */
300static void
301bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
302 u32 *dm_len)
303{
304 bfa_itnim_meminfo(cfg, km_len, dm_len);
305
Jing Huang5fbe25c2010-10-18 17:17:23 -0700306 /*
Jing Huang7725ccf2009-09-23 17:46:15 -0700307 * IO memory
308 */
309 if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
310 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
311 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
312 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
313
314 *km_len += cfg->fwcfg.num_ioim_reqs *
315 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
316
317 *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
318
Jing Huang5fbe25c2010-10-18 17:17:23 -0700319 /*
Jing Huang7725ccf2009-09-23 17:46:15 -0700320 * task management command memory
321 */
322 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
323 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
324 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
325}
326
327
328static void
329bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700330 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
Jing Huang7725ccf2009-09-23 17:46:15 -0700331{
332 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
333
334 bfa_trc(bfa, cfg->drvcfg.path_tov);
335 bfa_trc(bfa, cfg->fwcfg.num_rports);
336 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
337 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
338
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700339 fcpim->bfa = bfa;
340 fcpim->num_itnims = cfg->fwcfg.num_rports;
Jing Huang7725ccf2009-09-23 17:46:15 -0700341 fcpim->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
342 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700343 fcpim->path_tov = cfg->drvcfg.path_tov;
344 fcpim->delay_comp = cfg->drvcfg.delay_comp;
345 fcpim->profile_comp = NULL;
346 fcpim->profile_start = NULL;
Jing Huang7725ccf2009-09-23 17:46:15 -0700347
348 bfa_itnim_attach(fcpim, meminfo);
349 bfa_tskim_attach(fcpim, meminfo);
350 bfa_ioim_attach(fcpim, meminfo);
351}
352
353static void
Jing Huang7725ccf2009-09-23 17:46:15 -0700354bfa_fcpim_detach(struct bfa_s *bfa)
355{
Jing Huang7725ccf2009-09-23 17:46:15 -0700356}
357
358static void
359bfa_fcpim_start(struct bfa_s *bfa)
360{
361}
362
363static void
364bfa_fcpim_stop(struct bfa_s *bfa)
365{
366}
367
368static void
369bfa_fcpim_iocdisable(struct bfa_s *bfa)
370{
371 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
372 struct bfa_itnim_s *itnim;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700373 struct list_head *qe, *qen;
Jing Huang7725ccf2009-09-23 17:46:15 -0700374
375 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
376 itnim = (struct bfa_itnim_s *) qe;
377 bfa_itnim_iocdisable(itnim);
378 }
379}
380
381void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700382bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
383 struct bfa_itnim_iostats_s *rstats)
384{
385 bfa_fcpim_add_iostats(lstats, rstats, total_ios);
386 bfa_fcpim_add_iostats(lstats, rstats, qresumes);
387 bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
388 bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
389 bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
390 bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
391 bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
392 bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
393 bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
394 bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
395 bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
396 bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
397 bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
398 bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
399 bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
400 bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
401 bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
402 bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
403 bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
404 bfa_fcpim_add_iostats(lstats, rstats, onlines);
405 bfa_fcpim_add_iostats(lstats, rstats, offlines);
406 bfa_fcpim_add_iostats(lstats, rstats, creates);
407 bfa_fcpim_add_iostats(lstats, rstats, deletes);
408 bfa_fcpim_add_iostats(lstats, rstats, create_comps);
409 bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
410 bfa_fcpim_add_iostats(lstats, rstats, sler_events);
411 bfa_fcpim_add_iostats(lstats, rstats, fw_create);
412 bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
413 bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
414 bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
415 bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
416 bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
417 bfa_fcpim_add_iostats(lstats, rstats, tm_success);
418 bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
419 bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
420 bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
421 bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
422 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
423 bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
424 bfa_fcpim_add_iostats(lstats, rstats, io_comps);
425 bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
426 bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
427 bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
428 bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
429}
430
431void
Jing Huang7725ccf2009-09-23 17:46:15 -0700432bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
433{
434 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
435
436 fcpim->path_tov = path_tov * 1000;
437 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
438 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
439}
440
441u16
442bfa_fcpim_path_tov_get(struct bfa_s *bfa)
443{
444 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
445
Jing Huangf8ceafd2009-09-25 12:29:54 -0700446 return fcpim->path_tov / 1000;
Jing Huang7725ccf2009-09-23 17:46:15 -0700447}
448
449bfa_status_t
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700450bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
451 u8 lp_tag)
452{
453 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
454 struct list_head *qe, *qen;
455 struct bfa_itnim_s *itnim;
456
457 /* accumulate IO stats from itnim */
Jing Huang6a18b162010-10-18 17:08:54 -0700458 memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700459 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
460 itnim = (struct bfa_itnim_s *) qe;
461 if (itnim->rport->rport_info.lp_tag != lp_tag)
462 continue;
463 bfa_fcpim_add_stats(stats, &(itnim->stats));
464 }
465 return BFA_STATUS_OK;
466}
467bfa_status_t
468bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
469{
470 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
471 struct list_head *qe, *qen;
472 struct bfa_itnim_s *itnim;
473
474 /* accumulate IO stats from itnim */
Jing Huang6a18b162010-10-18 17:08:54 -0700475 memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700476 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
477 itnim = (struct bfa_itnim_s *) qe;
478 bfa_fcpim_add_stats(modstats, &(itnim->stats));
479 }
480 return BFA_STATUS_OK;
481}
482
483bfa_status_t
484bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
485 struct bfa_fcpim_del_itn_stats_s *modstats)
Jing Huang7725ccf2009-09-23 17:46:15 -0700486{
487 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
488
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700489 *modstats = fcpim->del_itn_stats;
Jing Huang7725ccf2009-09-23 17:46:15 -0700490
491 return BFA_STATUS_OK;
492}
493
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700494
495bfa_status_t
496bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
497{
498 struct bfa_itnim_s *itnim;
499 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
500 struct list_head *qe, *qen;
501
502 /* accumulate IO stats from itnim */
503 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
504 itnim = (struct bfa_itnim_s *) qe;
505 bfa_itnim_clear_stats(itnim);
506 }
507 fcpim->io_profile = BFA_TRUE;
508 fcpim->io_profile_start_time = time;
509 fcpim->profile_comp = bfa_ioim_profile_comp;
510 fcpim->profile_start = bfa_ioim_profile_start;
511
512 return BFA_STATUS_OK;
513}
514bfa_status_t
515bfa_fcpim_profile_off(struct bfa_s *bfa)
516{
517 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
518 fcpim->io_profile = BFA_FALSE;
519 fcpim->io_profile_start_time = 0;
520 fcpim->profile_comp = NULL;
521 fcpim->profile_start = NULL;
522 return BFA_STATUS_OK;
523}
524
525bfa_status_t
526bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
527{
528 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
529 struct list_head *qe, *qen;
530 struct bfa_itnim_s *itnim;
531
532 /* clear IO stats from all active itnims */
533 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
534 itnim = (struct bfa_itnim_s *) qe;
535 if (itnim->rport->rport_info.lp_tag != lp_tag)
536 continue;
537 bfa_itnim_clear_stats(itnim);
538 }
539 return BFA_STATUS_OK;
540
541}
542
Jing Huang7725ccf2009-09-23 17:46:15 -0700543bfa_status_t
544bfa_fcpim_clr_modstats(struct bfa_s *bfa)
545{
546 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700547 struct list_head *qe, *qen;
548 struct bfa_itnim_s *itnim;
Jing Huang7725ccf2009-09-23 17:46:15 -0700549
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700550 /* clear IO stats from all active itnims */
551 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
552 itnim = (struct bfa_itnim_s *) qe;
553 bfa_itnim_clear_stats(itnim);
554 }
Jing Huang6a18b162010-10-18 17:08:54 -0700555 memset(&fcpim->del_itn_stats, 0,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700556 sizeof(struct bfa_fcpim_del_itn_stats_s));
Jing Huang7725ccf2009-09-23 17:46:15 -0700557
558 return BFA_STATUS_OK;
559}
560
561void
562bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth)
563{
564 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
565
566 bfa_assert(q_depth <= BFA_IOCFC_QDEPTH_MAX);
567
568 fcpim->q_depth = q_depth;
569}
570
571u16
572bfa_fcpim_qdepth_get(struct bfa_s *bfa)
573{
574 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
575
Jing Huangf8ceafd2009-09-25 12:29:54 -0700576 return fcpim->q_depth;
Jing Huang7725ccf2009-09-23 17:46:15 -0700577}
578
Jing Huang36d345a2010-07-08 19:57:33 -0700579void
580bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
581{
582 bfa_boolean_t ioredirect;
Jing Huang7725ccf2009-09-23 17:46:15 -0700583
Jing Huang36d345a2010-07-08 19:57:33 -0700584 /*
585 * IO redirection is turned off when QoS is enabled and vice versa
586 */
587 ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
Jing Huang36d345a2010-07-08 19:57:33 -0700588}
589
590void
591bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
592{
593 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
594 fcpim->ioredirect = state;
595}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700596
597
598
Jing Huang5fbe25c2010-10-18 17:17:23 -0700599/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700600 * BFA ITNIM module state machine functions
601 */
602
Jing Huang5fbe25c2010-10-18 17:17:23 -0700603/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700604 * Beginning/unallocated state - no events expected.
605 */
606static void
607bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
608{
609 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
610 bfa_trc(itnim->bfa, event);
611
612 switch (event) {
613 case BFA_ITNIM_SM_CREATE:
614 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
615 itnim->is_online = BFA_FALSE;
616 bfa_fcpim_additn(itnim);
617 break;
618
619 default:
620 bfa_sm_fault(itnim->bfa, event);
621 }
622}
623
Jing Huang5fbe25c2010-10-18 17:17:23 -0700624/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700625 * Beginning state, only online event expected.
626 */
627static void
628bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
629{
630 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
631 bfa_trc(itnim->bfa, event);
632
633 switch (event) {
634 case BFA_ITNIM_SM_ONLINE:
635 if (bfa_itnim_send_fwcreate(itnim))
636 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
637 else
638 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
639 break;
640
641 case BFA_ITNIM_SM_DELETE:
642 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
643 bfa_fcpim_delitn(itnim);
644 break;
645
646 case BFA_ITNIM_SM_HWFAIL:
647 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
648 break;
649
650 default:
651 bfa_sm_fault(itnim->bfa, event);
652 }
653}
654
Jing Huang5fbe25c2010-10-18 17:17:23 -0700655/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700656 * Waiting for itnim create response from firmware.
657 */
658static void
659bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
660{
661 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
662 bfa_trc(itnim->bfa, event);
663
664 switch (event) {
665 case BFA_ITNIM_SM_FWRSP:
666 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
667 itnim->is_online = BFA_TRUE;
668 bfa_itnim_iotov_online(itnim);
669 bfa_itnim_online_cb(itnim);
670 break;
671
672 case BFA_ITNIM_SM_DELETE:
673 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
674 break;
675
676 case BFA_ITNIM_SM_OFFLINE:
677 if (bfa_itnim_send_fwdelete(itnim))
678 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
679 else
680 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
681 break;
682
683 case BFA_ITNIM_SM_HWFAIL:
684 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
685 break;
686
687 default:
688 bfa_sm_fault(itnim->bfa, event);
689 }
690}
691
692static void
693bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
694 enum bfa_itnim_event event)
695{
696 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
697 bfa_trc(itnim->bfa, event);
698
699 switch (event) {
700 case BFA_ITNIM_SM_QRESUME:
701 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
702 bfa_itnim_send_fwcreate(itnim);
703 break;
704
705 case BFA_ITNIM_SM_DELETE:
706 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
707 bfa_reqq_wcancel(&itnim->reqq_wait);
708 bfa_fcpim_delitn(itnim);
709 break;
710
711 case BFA_ITNIM_SM_OFFLINE:
712 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
713 bfa_reqq_wcancel(&itnim->reqq_wait);
714 bfa_itnim_offline_cb(itnim);
715 break;
716
717 case BFA_ITNIM_SM_HWFAIL:
718 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
719 bfa_reqq_wcancel(&itnim->reqq_wait);
720 break;
721
722 default:
723 bfa_sm_fault(itnim->bfa, event);
724 }
725}
726
Jing Huang5fbe25c2010-10-18 17:17:23 -0700727/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700728 * Waiting for itnim create response from firmware, a delete is pending.
729 */
730static void
731bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
732 enum bfa_itnim_event event)
733{
734 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
735 bfa_trc(itnim->bfa, event);
736
737 switch (event) {
738 case BFA_ITNIM_SM_FWRSP:
739 if (bfa_itnim_send_fwdelete(itnim))
740 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
741 else
742 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
743 break;
744
745 case BFA_ITNIM_SM_HWFAIL:
746 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
747 bfa_fcpim_delitn(itnim);
748 break;
749
750 default:
751 bfa_sm_fault(itnim->bfa, event);
752 }
753}
754
Jing Huang5fbe25c2010-10-18 17:17:23 -0700755/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700756 * Online state - normal parking state.
757 */
758static void
759bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
760{
761 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
762 bfa_trc(itnim->bfa, event);
763
764 switch (event) {
765 case BFA_ITNIM_SM_OFFLINE:
766 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
767 itnim->is_online = BFA_FALSE;
768 bfa_itnim_iotov_start(itnim);
769 bfa_itnim_cleanup(itnim);
770 break;
771
772 case BFA_ITNIM_SM_DELETE:
773 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
774 itnim->is_online = BFA_FALSE;
775 bfa_itnim_cleanup(itnim);
776 break;
777
778 case BFA_ITNIM_SM_SLER:
779 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
780 itnim->is_online = BFA_FALSE;
781 bfa_itnim_iotov_start(itnim);
782 bfa_itnim_sler_cb(itnim);
783 break;
784
785 case BFA_ITNIM_SM_HWFAIL:
786 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
787 itnim->is_online = BFA_FALSE;
788 bfa_itnim_iotov_start(itnim);
789 bfa_itnim_iocdisable_cleanup(itnim);
790 break;
791
792 default:
793 bfa_sm_fault(itnim->bfa, event);
794 }
795}
796
Jing Huang5fbe25c2010-10-18 17:17:23 -0700797/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700798 * Second level error recovery need.
799 */
800static void
801bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
802{
803 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
804 bfa_trc(itnim->bfa, event);
805
806 switch (event) {
807 case BFA_ITNIM_SM_OFFLINE:
808 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
809 bfa_itnim_cleanup(itnim);
810 break;
811
812 case BFA_ITNIM_SM_DELETE:
813 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
814 bfa_itnim_cleanup(itnim);
815 bfa_itnim_iotov_delete(itnim);
816 break;
817
818 case BFA_ITNIM_SM_HWFAIL:
819 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
820 bfa_itnim_iocdisable_cleanup(itnim);
821 break;
822
823 default:
824 bfa_sm_fault(itnim->bfa, event);
825 }
826}
827
Jing Huang5fbe25c2010-10-18 17:17:23 -0700828/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700829 * Going offline. Waiting for active IO cleanup.
830 */
831static void
832bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
833 enum bfa_itnim_event event)
834{
835 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
836 bfa_trc(itnim->bfa, event);
837
838 switch (event) {
839 case BFA_ITNIM_SM_CLEANUP:
840 if (bfa_itnim_send_fwdelete(itnim))
841 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
842 else
843 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
844 break;
845
846 case BFA_ITNIM_SM_DELETE:
847 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
848 bfa_itnim_iotov_delete(itnim);
849 break;
850
851 case BFA_ITNIM_SM_HWFAIL:
852 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
853 bfa_itnim_iocdisable_cleanup(itnim);
854 bfa_itnim_offline_cb(itnim);
855 break;
856
857 case BFA_ITNIM_SM_SLER:
858 break;
859
860 default:
861 bfa_sm_fault(itnim->bfa, event);
862 }
863}
864
Jing Huang5fbe25c2010-10-18 17:17:23 -0700865/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700866 * Deleting itnim. Waiting for active IO cleanup.
867 */
868static void
869bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
870 enum bfa_itnim_event event)
871{
872 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
873 bfa_trc(itnim->bfa, event);
874
875 switch (event) {
876 case BFA_ITNIM_SM_CLEANUP:
877 if (bfa_itnim_send_fwdelete(itnim))
878 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
879 else
880 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
881 break;
882
883 case BFA_ITNIM_SM_HWFAIL:
884 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
885 bfa_itnim_iocdisable_cleanup(itnim);
886 break;
887
888 default:
889 bfa_sm_fault(itnim->bfa, event);
890 }
891}
892
Jing Huang5fbe25c2010-10-18 17:17:23 -0700893/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700894 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
895 */
896static void
897bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
898{
899 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
900 bfa_trc(itnim->bfa, event);
901
902 switch (event) {
903 case BFA_ITNIM_SM_FWRSP:
904 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
905 bfa_itnim_offline_cb(itnim);
906 break;
907
908 case BFA_ITNIM_SM_DELETE:
909 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
910 break;
911
912 case BFA_ITNIM_SM_HWFAIL:
913 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
914 bfa_itnim_offline_cb(itnim);
915 break;
916
917 default:
918 bfa_sm_fault(itnim->bfa, event);
919 }
920}
921
922static void
923bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
924 enum bfa_itnim_event event)
925{
926 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
927 bfa_trc(itnim->bfa, event);
928
929 switch (event) {
930 case BFA_ITNIM_SM_QRESUME:
931 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
932 bfa_itnim_send_fwdelete(itnim);
933 break;
934
935 case BFA_ITNIM_SM_DELETE:
936 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
937 break;
938
939 case BFA_ITNIM_SM_HWFAIL:
940 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
941 bfa_reqq_wcancel(&itnim->reqq_wait);
942 bfa_itnim_offline_cb(itnim);
943 break;
944
945 default:
946 bfa_sm_fault(itnim->bfa, event);
947 }
948}
949
Jing Huang5fbe25c2010-10-18 17:17:23 -0700950/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700951 * Offline state.
952 */
953static void
954bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
955{
956 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
957 bfa_trc(itnim->bfa, event);
958
959 switch (event) {
960 case BFA_ITNIM_SM_DELETE:
961 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
962 bfa_itnim_iotov_delete(itnim);
963 bfa_fcpim_delitn(itnim);
964 break;
965
966 case BFA_ITNIM_SM_ONLINE:
967 if (bfa_itnim_send_fwcreate(itnim))
968 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
969 else
970 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
971 break;
972
973 case BFA_ITNIM_SM_HWFAIL:
974 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
975 break;
976
977 default:
978 bfa_sm_fault(itnim->bfa, event);
979 }
980}
981
Jing Huang5fbe25c2010-10-18 17:17:23 -0700982/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700983 * IOC h/w failed state.
984 */
985static void
986bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
987 enum bfa_itnim_event event)
988{
989 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
990 bfa_trc(itnim->bfa, event);
991
992 switch (event) {
993 case BFA_ITNIM_SM_DELETE:
994 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
995 bfa_itnim_iotov_delete(itnim);
996 bfa_fcpim_delitn(itnim);
997 break;
998
999 case BFA_ITNIM_SM_OFFLINE:
1000 bfa_itnim_offline_cb(itnim);
1001 break;
1002
1003 case BFA_ITNIM_SM_ONLINE:
1004 if (bfa_itnim_send_fwcreate(itnim))
1005 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
1006 else
1007 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
1008 break;
1009
1010 case BFA_ITNIM_SM_HWFAIL:
1011 break;
1012
1013 default:
1014 bfa_sm_fault(itnim->bfa, event);
1015 }
1016}
1017
Jing Huang5fbe25c2010-10-18 17:17:23 -07001018/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001019 * Itnim is deleted, waiting for firmware response to delete.
1020 */
1021static void
1022bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
1023{
1024 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1025 bfa_trc(itnim->bfa, event);
1026
1027 switch (event) {
1028 case BFA_ITNIM_SM_FWRSP:
1029 case BFA_ITNIM_SM_HWFAIL:
1030 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1031 bfa_fcpim_delitn(itnim);
1032 break;
1033
1034 default:
1035 bfa_sm_fault(itnim->bfa, event);
1036 }
1037}
1038
1039static void
1040bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
1041 enum bfa_itnim_event event)
1042{
1043 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
1044 bfa_trc(itnim->bfa, event);
1045
1046 switch (event) {
1047 case BFA_ITNIM_SM_QRESUME:
1048 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
1049 bfa_itnim_send_fwdelete(itnim);
1050 break;
1051
1052 case BFA_ITNIM_SM_HWFAIL:
1053 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1054 bfa_reqq_wcancel(&itnim->reqq_wait);
1055 bfa_fcpim_delitn(itnim);
1056 break;
1057
1058 default:
1059 bfa_sm_fault(itnim->bfa, event);
1060 }
1061}
1062
Jing Huang5fbe25c2010-10-18 17:17:23 -07001063/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001064 * Initiate cleanup of all IOs on an IOC failure.
1065 */
1066static void
1067bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
1068{
1069 struct bfa_tskim_s *tskim;
1070 struct bfa_ioim_s *ioim;
1071 struct list_head *qe, *qen;
1072
1073 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1074 tskim = (struct bfa_tskim_s *) qe;
1075 bfa_tskim_iocdisable(tskim);
1076 }
1077
1078 list_for_each_safe(qe, qen, &itnim->io_q) {
1079 ioim = (struct bfa_ioim_s *) qe;
1080 bfa_ioim_iocdisable(ioim);
1081 }
1082
Jing Huang5fbe25c2010-10-18 17:17:23 -07001083 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001084 * For IO request in pending queue, we pretend an early timeout.
1085 */
1086 list_for_each_safe(qe, qen, &itnim->pending_q) {
1087 ioim = (struct bfa_ioim_s *) qe;
1088 bfa_ioim_tov(ioim);
1089 }
1090
1091 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
1092 ioim = (struct bfa_ioim_s *) qe;
1093 bfa_ioim_iocdisable(ioim);
1094 }
1095}
1096
Jing Huang5fbe25c2010-10-18 17:17:23 -07001097/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001098 * IO cleanup completion
1099 */
1100static void
1101bfa_itnim_cleanp_comp(void *itnim_cbarg)
1102{
1103 struct bfa_itnim_s *itnim = itnim_cbarg;
1104
1105 bfa_stats(itnim, cleanup_comps);
1106 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
1107}
1108
Jing Huang5fbe25c2010-10-18 17:17:23 -07001109/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001110 * Initiate cleanup of all IOs.
1111 */
1112static void
1113bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
1114{
1115 struct bfa_ioim_s *ioim;
1116 struct bfa_tskim_s *tskim;
1117 struct list_head *qe, *qen;
1118
1119 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
1120
1121 list_for_each_safe(qe, qen, &itnim->io_q) {
1122 ioim = (struct bfa_ioim_s *) qe;
1123
Jing Huang5fbe25c2010-10-18 17:17:23 -07001124 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001125 * Move IO to a cleanup queue from active queue so that a later
1126 * TM will not pickup this IO.
1127 */
1128 list_del(&ioim->qe);
1129 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
1130
1131 bfa_wc_up(&itnim->wc);
1132 bfa_ioim_cleanup(ioim);
1133 }
1134
1135 list_for_each_safe(qe, qen, &itnim->tsk_q) {
1136 tskim = (struct bfa_tskim_s *) qe;
1137 bfa_wc_up(&itnim->wc);
1138 bfa_tskim_cleanup(tskim);
1139 }
1140
1141 bfa_wc_wait(&itnim->wc);
1142}
1143
1144static void
1145__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
1146{
1147 struct bfa_itnim_s *itnim = cbarg;
1148
1149 if (complete)
1150 bfa_cb_itnim_online(itnim->ditn);
1151}
1152
1153static void
1154__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
1155{
1156 struct bfa_itnim_s *itnim = cbarg;
1157
1158 if (complete)
1159 bfa_cb_itnim_offline(itnim->ditn);
1160}
1161
1162static void
1163__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
1164{
1165 struct bfa_itnim_s *itnim = cbarg;
1166
1167 if (complete)
1168 bfa_cb_itnim_sler(itnim->ditn);
1169}
1170
Jing Huang5fbe25c2010-10-18 17:17:23 -07001171/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001172 * Call to resume any I/O requests waiting for room in request queue.
1173 */
1174static void
1175bfa_itnim_qresume(void *cbarg)
1176{
1177 struct bfa_itnim_s *itnim = cbarg;
1178
1179 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
1180}
1181
1182
1183
1184
Jing Huang5fbe25c2010-10-18 17:17:23 -07001185/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001186 * bfa_itnim_public
1187 */
1188
1189void
1190bfa_itnim_iodone(struct bfa_itnim_s *itnim)
1191{
1192 bfa_wc_down(&itnim->wc);
1193}
1194
1195void
1196bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
1197{
1198 bfa_wc_down(&itnim->wc);
1199}
1200
1201void
1202bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1203 u32 *dm_len)
1204{
Jing Huang5fbe25c2010-10-18 17:17:23 -07001205 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001206 * ITN memory
1207 */
1208 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
1209}
1210
1211void
1212bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1213{
1214 struct bfa_s *bfa = fcpim->bfa;
1215 struct bfa_itnim_s *itnim;
1216 int i, j;
1217
1218 INIT_LIST_HEAD(&fcpim->itnim_q);
1219
1220 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
1221 fcpim->itnim_arr = itnim;
1222
1223 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
Jing Huang6a18b162010-10-18 17:08:54 -07001224 memset(itnim, 0, sizeof(struct bfa_itnim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001225 itnim->bfa = bfa;
1226 itnim->fcpim = fcpim;
1227 itnim->reqq = BFA_REQQ_QOS_LO;
1228 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1229 itnim->iotov_active = BFA_FALSE;
1230 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1231
1232 INIT_LIST_HEAD(&itnim->io_q);
1233 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1234 INIT_LIST_HEAD(&itnim->pending_q);
1235 INIT_LIST_HEAD(&itnim->tsk_q);
1236 INIT_LIST_HEAD(&itnim->delay_comp_q);
1237 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1238 itnim->ioprofile.io_latency.min[j] = ~0;
1239 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1240 }
1241
1242 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1243}
1244
1245void
1246bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1247{
1248 bfa_stats(itnim, ioc_disabled);
1249 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1250}
1251
1252static bfa_boolean_t
1253bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1254{
1255 struct bfi_itnim_create_req_s *m;
1256
1257 itnim->msg_no++;
1258
Jing Huang5fbe25c2010-10-18 17:17:23 -07001259 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001260 * check for room in queue to send request now
1261 */
1262 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1263 if (!m) {
1264 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1265 return BFA_FALSE;
1266 }
1267
1268 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
1269 bfa_lpuid(itnim->bfa));
1270 m->fw_handle = itnim->rport->fw_handle;
1271 m->class = FC_CLASS_3;
1272 m->seq_rec = itnim->seq_rec;
1273 m->msg_no = itnim->msg_no;
1274 bfa_stats(itnim, fw_create);
1275
Jing Huang5fbe25c2010-10-18 17:17:23 -07001276 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001277 * queue I/O message to firmware
1278 */
1279 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1280 return BFA_TRUE;
1281}
1282
1283static bfa_boolean_t
1284bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1285{
1286 struct bfi_itnim_delete_req_s *m;
1287
Jing Huang5fbe25c2010-10-18 17:17:23 -07001288 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001289 * check for room in queue to send request now
1290 */
1291 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1292 if (!m) {
1293 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1294 return BFA_FALSE;
1295 }
1296
1297 bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
1298 bfa_lpuid(itnim->bfa));
1299 m->fw_handle = itnim->rport->fw_handle;
1300 bfa_stats(itnim, fw_delete);
1301
Jing Huang5fbe25c2010-10-18 17:17:23 -07001302 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001303 * queue I/O message to firmware
1304 */
1305 bfa_reqq_produce(itnim->bfa, itnim->reqq);
1306 return BFA_TRUE;
1307}
1308
Jing Huang5fbe25c2010-10-18 17:17:23 -07001309/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001310 * Cleanup all pending failed inflight requests.
1311 */
1312static void
1313bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1314{
1315 struct bfa_ioim_s *ioim;
1316 struct list_head *qe, *qen;
1317
1318 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1319 ioim = (struct bfa_ioim_s *)qe;
1320 bfa_ioim_delayed_comp(ioim, iotov);
1321 }
1322}
1323
Jing Huang5fbe25c2010-10-18 17:17:23 -07001324/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001325 * Start all pending IO requests.
1326 */
1327static void
1328bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1329{
1330 struct bfa_ioim_s *ioim;
1331
1332 bfa_itnim_iotov_stop(itnim);
1333
Jing Huang5fbe25c2010-10-18 17:17:23 -07001334 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001335 * Abort all inflight IO requests in the queue
1336 */
1337 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1338
Jing Huang5fbe25c2010-10-18 17:17:23 -07001339 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001340 * Start all pending IO requests.
1341 */
1342 while (!list_empty(&itnim->pending_q)) {
1343 bfa_q_deq(&itnim->pending_q, &ioim);
1344 list_add_tail(&ioim->qe, &itnim->io_q);
1345 bfa_ioim_start(ioim);
1346 }
1347}
1348
Jing Huang5fbe25c2010-10-18 17:17:23 -07001349/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001350 * Fail all pending IO requests
1351 */
1352static void
1353bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1354{
1355 struct bfa_ioim_s *ioim;
1356
Jing Huang5fbe25c2010-10-18 17:17:23 -07001357 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001358 * Fail all inflight IO requests in the queue
1359 */
1360 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1361
Jing Huang5fbe25c2010-10-18 17:17:23 -07001362 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001363 * Fail any pending IO requests.
1364 */
1365 while (!list_empty(&itnim->pending_q)) {
1366 bfa_q_deq(&itnim->pending_q, &ioim);
1367 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1368 bfa_ioim_tov(ioim);
1369 }
1370}
1371
Jing Huang5fbe25c2010-10-18 17:17:23 -07001372/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001373 * IO TOV timer callback. Fail any pending IO requests.
1374 */
1375static void
1376bfa_itnim_iotov(void *itnim_arg)
1377{
1378 struct bfa_itnim_s *itnim = itnim_arg;
1379
1380 itnim->iotov_active = BFA_FALSE;
1381
1382 bfa_cb_itnim_tov_begin(itnim->ditn);
1383 bfa_itnim_iotov_cleanup(itnim);
1384 bfa_cb_itnim_tov(itnim->ditn);
1385}
1386
Jing Huang5fbe25c2010-10-18 17:17:23 -07001387/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001388 * Start IO TOV timer for failing back pending IO requests in offline state.
1389 */
1390static void
1391bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1392{
1393 if (itnim->fcpim->path_tov > 0) {
1394
1395 itnim->iotov_active = BFA_TRUE;
1396 bfa_assert(bfa_itnim_hold_io(itnim));
1397 bfa_timer_start(itnim->bfa, &itnim->timer,
1398 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1399 }
1400}
1401
Jing Huang5fbe25c2010-10-18 17:17:23 -07001402/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001403 * Stop IO TOV timer.
1404 */
1405static void
1406bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1407{
1408 if (itnim->iotov_active) {
1409 itnim->iotov_active = BFA_FALSE;
1410 bfa_timer_stop(&itnim->timer);
1411 }
1412}
1413
Jing Huang5fbe25c2010-10-18 17:17:23 -07001414/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001415 * Stop IO TOV timer.
1416 */
1417static void
1418bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1419{
1420 bfa_boolean_t pathtov_active = BFA_FALSE;
1421
1422 if (itnim->iotov_active)
1423 pathtov_active = BFA_TRUE;
1424
1425 bfa_itnim_iotov_stop(itnim);
1426 if (pathtov_active)
1427 bfa_cb_itnim_tov_begin(itnim->ditn);
1428 bfa_itnim_iotov_cleanup(itnim);
1429 if (pathtov_active)
1430 bfa_cb_itnim_tov(itnim->ditn);
1431}
1432
1433static void
1434bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1435{
1436 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1437 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1438 itnim->stats.iocomp_aborted;
1439 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1440 itnim->stats.iocomp_timedout;
1441 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1442 itnim->stats.iocom_sqer_needed;
1443 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1444 itnim->stats.iocom_res_free;
1445 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1446 itnim->stats.iocom_hostabrts;
1447 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1448 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1449 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1450}
1451
1452
1453
Jing Huang5fbe25c2010-10-18 17:17:23 -07001454/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001455 * bfa_itnim_public
1456 */
1457
Jing Huang5fbe25c2010-10-18 17:17:23 -07001458/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001459 * Itnim interrupt processing.
1460 */
1461void
1462bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1463{
1464 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1465 union bfi_itnim_i2h_msg_u msg;
1466 struct bfa_itnim_s *itnim;
1467
1468 bfa_trc(bfa, m->mhdr.msg_id);
1469
1470 msg.msg = m;
1471
1472 switch (m->mhdr.msg_id) {
1473 case BFI_ITNIM_I2H_CREATE_RSP:
1474 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1475 msg.create_rsp->bfa_handle);
1476 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
1477 bfa_stats(itnim, create_comps);
1478 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1479 break;
1480
1481 case BFI_ITNIM_I2H_DELETE_RSP:
1482 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1483 msg.delete_rsp->bfa_handle);
1484 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
1485 bfa_stats(itnim, delete_comps);
1486 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1487 break;
1488
1489 case BFI_ITNIM_I2H_SLER_EVENT:
1490 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1491 msg.sler_event->bfa_handle);
1492 bfa_stats(itnim, sler_events);
1493 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1494 break;
1495
1496 default:
1497 bfa_trc(bfa, m->mhdr.msg_id);
1498 bfa_assert(0);
1499 }
1500}
1501
1502
1503
Jing Huang5fbe25c2010-10-18 17:17:23 -07001504/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001505 * bfa_itnim_api
1506 */
1507
1508struct bfa_itnim_s *
1509bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1510{
1511 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1512 struct bfa_itnim_s *itnim;
1513
1514 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
1515 bfa_assert(itnim->rport == rport);
1516
1517 itnim->ditn = ditn;
1518
1519 bfa_stats(itnim, creates);
1520 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1521
1522 return itnim;
1523}
1524
1525void
1526bfa_itnim_delete(struct bfa_itnim_s *itnim)
1527{
1528 bfa_stats(itnim, deletes);
1529 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1530}
1531
1532void
1533bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1534{
1535 itnim->seq_rec = seq_rec;
1536 bfa_stats(itnim, onlines);
1537 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1538}
1539
1540void
1541bfa_itnim_offline(struct bfa_itnim_s *itnim)
1542{
1543 bfa_stats(itnim, offlines);
1544 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1545}
1546
Jing Huang5fbe25c2010-10-18 17:17:23 -07001547/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001548 * Return true if itnim is considered offline for holding off IO request.
1549 * IO is not held if itnim is being deleted.
1550 */
1551bfa_boolean_t
1552bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1553{
1554 return itnim->fcpim->path_tov && itnim->iotov_active &&
1555 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1556 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1557 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1558 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1559 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1560 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1561}
1562
1563bfa_status_t
1564bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
1565 struct bfa_itnim_ioprofile_s *ioprofile)
1566{
1567 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
1568 if (!fcpim->io_profile)
1569 return BFA_STATUS_IOPROFILE_OFF;
1570
1571 itnim->ioprofile.index = BFA_IOBUCKET_MAX;
1572 itnim->ioprofile.io_profile_start_time =
1573 bfa_io_profile_start_time(itnim->bfa);
1574 itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
1575 itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
1576 *ioprofile = itnim->ioprofile;
1577
1578 return BFA_STATUS_OK;
1579}
1580
1581void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001582bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1583{
1584 int j;
Jing Huang6a18b162010-10-18 17:08:54 -07001585 memset(&itnim->stats, 0, sizeof(itnim->stats));
1586 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001587 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1588 itnim->ioprofile.io_latency.min[j] = ~0;
1589}
1590
Jing Huang5fbe25c2010-10-18 17:17:23 -07001591/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001592 * BFA IO module state machine functions
1593 */
1594
Jing Huang5fbe25c2010-10-18 17:17:23 -07001595/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001596 * IO is not started (unallocated).
1597 */
1598static void
1599bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1600{
1601 bfa_trc_fp(ioim->bfa, ioim->iotag);
1602 bfa_trc_fp(ioim->bfa, event);
1603
1604 switch (event) {
1605 case BFA_IOIM_SM_START:
1606 if (!bfa_itnim_is_online(ioim->itnim)) {
1607 if (!bfa_itnim_hold_io(ioim->itnim)) {
1608 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1609 list_del(&ioim->qe);
1610 list_add_tail(&ioim->qe,
1611 &ioim->fcpim->ioim_comp_q);
1612 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1613 __bfa_cb_ioim_pathtov, ioim);
1614 } else {
1615 list_del(&ioim->qe);
1616 list_add_tail(&ioim->qe,
1617 &ioim->itnim->pending_q);
1618 }
1619 break;
1620 }
1621
1622 if (ioim->nsges > BFI_SGE_INLINE) {
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08001623 if (!bfa_ioim_sgpg_alloc(ioim)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001624 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1625 return;
1626 }
1627 }
1628
1629 if (!bfa_ioim_send_ioreq(ioim)) {
1630 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1631 break;
1632 }
1633
1634 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1635 break;
1636
1637 case BFA_IOIM_SM_IOTOV:
1638 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1639 bfa_ioim_move_to_comp_q(ioim);
1640 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1641 __bfa_cb_ioim_pathtov, ioim);
1642 break;
1643
1644 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001645 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001646 * IO in pending queue can get abort requests. Complete abort
1647 * requests immediately.
1648 */
1649 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1650 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
1651 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1652 __bfa_cb_ioim_abort, ioim);
1653 break;
1654
1655 default:
1656 bfa_sm_fault(ioim->bfa, event);
1657 }
1658}
1659
Jing Huang5fbe25c2010-10-18 17:17:23 -07001660/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001661 * IO is waiting for SG pages.
1662 */
1663static void
1664bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1665{
1666 bfa_trc(ioim->bfa, ioim->iotag);
1667 bfa_trc(ioim->bfa, event);
1668
1669 switch (event) {
1670 case BFA_IOIM_SM_SGALLOCED:
1671 if (!bfa_ioim_send_ioreq(ioim)) {
1672 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1673 break;
1674 }
1675 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1676 break;
1677
1678 case BFA_IOIM_SM_CLEANUP:
1679 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1680 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1681 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1682 ioim);
1683 bfa_ioim_notify_cleanup(ioim);
1684 break;
1685
1686 case BFA_IOIM_SM_ABORT:
1687 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1688 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1689 bfa_ioim_move_to_comp_q(ioim);
1690 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1691 ioim);
1692 break;
1693
1694 case BFA_IOIM_SM_HWFAIL:
1695 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1696 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1697 bfa_ioim_move_to_comp_q(ioim);
1698 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1699 ioim);
1700 break;
1701
1702 default:
1703 bfa_sm_fault(ioim->bfa, event);
1704 }
1705}
1706
Jing Huang5fbe25c2010-10-18 17:17:23 -07001707/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001708 * IO is active.
1709 */
1710static void
1711bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1712{
1713 bfa_trc_fp(ioim->bfa, ioim->iotag);
1714 bfa_trc_fp(ioim->bfa, event);
1715
1716 switch (event) {
1717 case BFA_IOIM_SM_COMP_GOOD:
1718 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1719 bfa_ioim_move_to_comp_q(ioim);
1720 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1721 __bfa_cb_ioim_good_comp, ioim);
1722 break;
1723
1724 case BFA_IOIM_SM_COMP:
1725 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1726 bfa_ioim_move_to_comp_q(ioim);
1727 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1728 ioim);
1729 break;
1730
1731 case BFA_IOIM_SM_DONE:
1732 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1733 bfa_ioim_move_to_comp_q(ioim);
1734 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1735 ioim);
1736 break;
1737
1738 case BFA_IOIM_SM_ABORT:
1739 ioim->iosp->abort_explicit = BFA_TRUE;
1740 ioim->io_cbfn = __bfa_cb_ioim_abort;
1741
1742 if (bfa_ioim_send_abort(ioim))
1743 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1744 else {
1745 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1746 bfa_stats(ioim->itnim, qwait);
1747 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1748 &ioim->iosp->reqq_wait);
1749 }
1750 break;
1751
1752 case BFA_IOIM_SM_CLEANUP:
1753 ioim->iosp->abort_explicit = BFA_FALSE;
1754 ioim->io_cbfn = __bfa_cb_ioim_failed;
1755
1756 if (bfa_ioim_send_abort(ioim))
1757 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1758 else {
1759 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1760 bfa_stats(ioim->itnim, qwait);
1761 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1762 &ioim->iosp->reqq_wait);
1763 }
1764 break;
1765
1766 case BFA_IOIM_SM_HWFAIL:
1767 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1768 bfa_ioim_move_to_comp_q(ioim);
1769 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1770 ioim);
1771 break;
1772
1773 case BFA_IOIM_SM_SQRETRY:
1774 if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
1775 /* max retry completed free IO */
1776 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1777 bfa_ioim_move_to_comp_q(ioim);
1778 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1779 __bfa_cb_ioim_failed, ioim);
1780 break;
1781 }
1782 /* waiting for IO tag resource free */
1783 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1784 break;
1785
1786 default:
1787 bfa_sm_fault(ioim->bfa, event);
1788 }
1789}
1790
Jing Huang5fbe25c2010-10-18 17:17:23 -07001791/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001792* IO is retried with new tag.
1793*/
1794static void
1795bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1796{
1797 bfa_trc_fp(ioim->bfa, ioim->iotag);
1798 bfa_trc_fp(ioim->bfa, event);
1799
1800 switch (event) {
1801 case BFA_IOIM_SM_FREE:
1802 /* abts and rrq done. Now retry the IO with new tag */
1803 if (!bfa_ioim_send_ioreq(ioim)) {
1804 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1805 break;
1806 }
1807 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1808 break;
1809
1810 case BFA_IOIM_SM_CLEANUP:
1811 ioim->iosp->abort_explicit = BFA_FALSE;
1812 ioim->io_cbfn = __bfa_cb_ioim_failed;
1813
1814 if (bfa_ioim_send_abort(ioim))
1815 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1816 else {
1817 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1818 bfa_stats(ioim->itnim, qwait);
1819 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1820 &ioim->iosp->reqq_wait);
1821 }
1822 break;
1823
1824 case BFA_IOIM_SM_HWFAIL:
1825 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1826 bfa_ioim_move_to_comp_q(ioim);
1827 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1828 __bfa_cb_ioim_failed, ioim);
1829 break;
1830
1831 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001832 /* in this state IO abort is done.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001833 * Waiting for IO tag resource free.
1834 */
1835 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1836 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1837 ioim);
1838 break;
1839
1840 default:
1841 bfa_sm_fault(ioim->bfa, event);
1842 }
1843}
1844
Jing Huang5fbe25c2010-10-18 17:17:23 -07001845/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001846 * IO is being aborted, waiting for completion from firmware.
1847 */
1848static void
1849bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1850{
1851 bfa_trc(ioim->bfa, ioim->iotag);
1852 bfa_trc(ioim->bfa, event);
1853
1854 switch (event) {
1855 case BFA_IOIM_SM_COMP_GOOD:
1856 case BFA_IOIM_SM_COMP:
1857 case BFA_IOIM_SM_DONE:
1858 case BFA_IOIM_SM_FREE:
1859 break;
1860
1861 case BFA_IOIM_SM_ABORT_DONE:
1862 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1863 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1864 ioim);
1865 break;
1866
1867 case BFA_IOIM_SM_ABORT_COMP:
1868 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1869 bfa_ioim_move_to_comp_q(ioim);
1870 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1871 ioim);
1872 break;
1873
1874 case BFA_IOIM_SM_COMP_UTAG:
1875 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1876 bfa_ioim_move_to_comp_q(ioim);
1877 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1878 ioim);
1879 break;
1880
1881 case BFA_IOIM_SM_CLEANUP:
1882 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
1883 ioim->iosp->abort_explicit = BFA_FALSE;
1884
1885 if (bfa_ioim_send_abort(ioim))
1886 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1887 else {
1888 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1889 bfa_stats(ioim->itnim, qwait);
1890 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1891 &ioim->iosp->reqq_wait);
1892 }
1893 break;
1894
1895 case BFA_IOIM_SM_HWFAIL:
1896 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1897 bfa_ioim_move_to_comp_q(ioim);
1898 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1899 ioim);
1900 break;
1901
1902 default:
1903 bfa_sm_fault(ioim->bfa, event);
1904 }
1905}
1906
Jing Huang5fbe25c2010-10-18 17:17:23 -07001907/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001908 * IO is being cleaned up (implicit abort), waiting for completion from
1909 * firmware.
1910 */
1911static void
1912bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1913{
1914 bfa_trc(ioim->bfa, ioim->iotag);
1915 bfa_trc(ioim->bfa, event);
1916
1917 switch (event) {
1918 case BFA_IOIM_SM_COMP_GOOD:
1919 case BFA_IOIM_SM_COMP:
1920 case BFA_IOIM_SM_DONE:
1921 case BFA_IOIM_SM_FREE:
1922 break;
1923
1924 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001925 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001926 * IO is already being aborted implicitly
1927 */
1928 ioim->io_cbfn = __bfa_cb_ioim_abort;
1929 break;
1930
1931 case BFA_IOIM_SM_ABORT_DONE:
1932 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1933 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1934 bfa_ioim_notify_cleanup(ioim);
1935 break;
1936
1937 case BFA_IOIM_SM_ABORT_COMP:
1938 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1939 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1940 bfa_ioim_notify_cleanup(ioim);
1941 break;
1942
1943 case BFA_IOIM_SM_COMP_UTAG:
1944 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1945 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1946 bfa_ioim_notify_cleanup(ioim);
1947 break;
1948
1949 case BFA_IOIM_SM_HWFAIL:
1950 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1951 bfa_ioim_move_to_comp_q(ioim);
1952 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1953 ioim);
1954 break;
1955
1956 case BFA_IOIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001957 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001958 * IO can be in cleanup state already due to TM command.
1959 * 2nd cleanup request comes from ITN offline event.
1960 */
1961 break;
1962
1963 default:
1964 bfa_sm_fault(ioim->bfa, event);
1965 }
1966}
1967
Jing Huang5fbe25c2010-10-18 17:17:23 -07001968/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001969 * IO is waiting for room in request CQ
1970 */
1971static void
1972bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1973{
1974 bfa_trc(ioim->bfa, ioim->iotag);
1975 bfa_trc(ioim->bfa, event);
1976
1977 switch (event) {
1978 case BFA_IOIM_SM_QRESUME:
1979 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1980 bfa_ioim_send_ioreq(ioim);
1981 break;
1982
1983 case BFA_IOIM_SM_ABORT:
1984 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1985 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1986 bfa_ioim_move_to_comp_q(ioim);
1987 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1988 ioim);
1989 break;
1990
1991 case BFA_IOIM_SM_CLEANUP:
1992 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1993 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1994 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1995 ioim);
1996 bfa_ioim_notify_cleanup(ioim);
1997 break;
1998
1999 case BFA_IOIM_SM_HWFAIL:
2000 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2001 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2002 bfa_ioim_move_to_comp_q(ioim);
2003 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2004 ioim);
2005 break;
2006
2007 default:
2008 bfa_sm_fault(ioim->bfa, event);
2009 }
2010}
2011
Jing Huang5fbe25c2010-10-18 17:17:23 -07002012/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002013 * Active IO is being aborted, waiting for room in request CQ.
2014 */
2015static void
2016bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2017{
2018 bfa_trc(ioim->bfa, ioim->iotag);
2019 bfa_trc(ioim->bfa, event);
2020
2021 switch (event) {
2022 case BFA_IOIM_SM_QRESUME:
2023 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
2024 bfa_ioim_send_abort(ioim);
2025 break;
2026
2027 case BFA_IOIM_SM_CLEANUP:
2028 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
2029 ioim->iosp->abort_explicit = BFA_FALSE;
2030 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
2031 break;
2032
2033 case BFA_IOIM_SM_COMP_GOOD:
2034 case BFA_IOIM_SM_COMP:
2035 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2036 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2037 bfa_ioim_move_to_comp_q(ioim);
2038 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2039 ioim);
2040 break;
2041
2042 case BFA_IOIM_SM_DONE:
2043 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2044 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2045 bfa_ioim_move_to_comp_q(ioim);
2046 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
2047 ioim);
2048 break;
2049
2050 case BFA_IOIM_SM_HWFAIL:
2051 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2052 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2053 bfa_ioim_move_to_comp_q(ioim);
2054 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2055 ioim);
2056 break;
2057
2058 default:
2059 bfa_sm_fault(ioim->bfa, event);
2060 }
2061}
2062
Jing Huang5fbe25c2010-10-18 17:17:23 -07002063/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002064 * Active IO is being cleaned up, waiting for room in request CQ.
2065 */
2066static void
2067bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2068{
2069 bfa_trc(ioim->bfa, ioim->iotag);
2070 bfa_trc(ioim->bfa, event);
2071
2072 switch (event) {
2073 case BFA_IOIM_SM_QRESUME:
2074 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
2075 bfa_ioim_send_abort(ioim);
2076 break;
2077
2078 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002079 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002080 * IO is alraedy being cleaned up implicitly
2081 */
2082 ioim->io_cbfn = __bfa_cb_ioim_abort;
2083 break;
2084
2085 case BFA_IOIM_SM_COMP_GOOD:
2086 case BFA_IOIM_SM_COMP:
2087 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2088 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2089 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2090 bfa_ioim_notify_cleanup(ioim);
2091 break;
2092
2093 case BFA_IOIM_SM_DONE:
2094 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
2095 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2096 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2097 bfa_ioim_notify_cleanup(ioim);
2098 break;
2099
2100 case BFA_IOIM_SM_HWFAIL:
2101 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2102 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
2103 bfa_ioim_move_to_comp_q(ioim);
2104 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
2105 ioim);
2106 break;
2107
2108 default:
2109 bfa_sm_fault(ioim->bfa, event);
2110 }
2111}
2112
Jing Huang5fbe25c2010-10-18 17:17:23 -07002113/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002114 * IO bfa callback is pending.
2115 */
2116static void
2117bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2118{
2119 bfa_trc_fp(ioim->bfa, ioim->iotag);
2120 bfa_trc_fp(ioim->bfa, event);
2121
2122 switch (event) {
2123 case BFA_IOIM_SM_HCB:
2124 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2125 bfa_ioim_free(ioim);
2126 break;
2127
2128 case BFA_IOIM_SM_CLEANUP:
2129 bfa_ioim_notify_cleanup(ioim);
2130 break;
2131
2132 case BFA_IOIM_SM_HWFAIL:
2133 break;
2134
2135 default:
2136 bfa_sm_fault(ioim->bfa, event);
2137 }
2138}
2139
Jing Huang5fbe25c2010-10-18 17:17:23 -07002140/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002141 * IO bfa callback is pending. IO resource cannot be freed.
2142 */
2143static void
2144bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2145{
2146 bfa_trc(ioim->bfa, ioim->iotag);
2147 bfa_trc(ioim->bfa, event);
2148
2149 switch (event) {
2150 case BFA_IOIM_SM_HCB:
2151 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
2152 list_del(&ioim->qe);
2153 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
2154 break;
2155
2156 case BFA_IOIM_SM_FREE:
2157 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2158 break;
2159
2160 case BFA_IOIM_SM_CLEANUP:
2161 bfa_ioim_notify_cleanup(ioim);
2162 break;
2163
2164 case BFA_IOIM_SM_HWFAIL:
2165 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
2166 break;
2167
2168 default:
2169 bfa_sm_fault(ioim->bfa, event);
2170 }
2171}
2172
Jing Huang5fbe25c2010-10-18 17:17:23 -07002173/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002174 * IO is completed, waiting resource free from firmware.
2175 */
2176static void
2177bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
2178{
2179 bfa_trc(ioim->bfa, ioim->iotag);
2180 bfa_trc(ioim->bfa, event);
2181
2182 switch (event) {
2183 case BFA_IOIM_SM_FREE:
2184 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2185 bfa_ioim_free(ioim);
2186 break;
2187
2188 case BFA_IOIM_SM_CLEANUP:
2189 bfa_ioim_notify_cleanup(ioim);
2190 break;
2191
2192 case BFA_IOIM_SM_HWFAIL:
2193 break;
2194
2195 default:
2196 bfa_sm_fault(ioim->bfa, event);
2197 }
2198}
2199
2200
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002201static void
2202__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
2203{
2204 struct bfa_ioim_s *ioim = cbarg;
2205
2206 if (!complete) {
2207 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2208 return;
2209 }
2210
2211 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
2212}
2213
2214static void
2215__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
2216{
2217 struct bfa_ioim_s *ioim = cbarg;
2218 struct bfi_ioim_rsp_s *m;
2219 u8 *snsinfo = NULL;
2220 u8 sns_len = 0;
2221 s32 residue = 0;
2222
2223 if (!complete) {
2224 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2225 return;
2226 }
2227
2228 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
2229 if (m->io_status == BFI_IOIM_STS_OK) {
Jing Huang5fbe25c2010-10-18 17:17:23 -07002230 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002231 * setup sense information, if present
2232 */
2233 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
2234 m->sns_len) {
2235 sns_len = m->sns_len;
2236 snsinfo = ioim->iosp->snsinfo;
2237 }
2238
Jing Huang5fbe25c2010-10-18 17:17:23 -07002239 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002240 * setup residue value correctly for normal completions
2241 */
2242 if (m->resid_flags == FCP_RESID_UNDER) {
Jing Huangba816ea2010-10-18 17:10:50 -07002243 residue = be32_to_cpu(m->residue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002244 bfa_stats(ioim->itnim, iocomp_underrun);
2245 }
2246 if (m->resid_flags == FCP_RESID_OVER) {
Jing Huangba816ea2010-10-18 17:10:50 -07002247 residue = be32_to_cpu(m->residue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002248 residue = -residue;
2249 bfa_stats(ioim->itnim, iocomp_overrun);
2250 }
2251 }
2252
2253 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
2254 m->scsi_status, sns_len, snsinfo, residue);
2255}
2256
2257static void
2258__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2259{
2260 struct bfa_ioim_s *ioim = cbarg;
2261
2262 if (!complete) {
2263 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2264 return;
2265 }
2266
2267 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2268 0, 0, NULL, 0);
2269}
2270
2271static void
2272__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2273{
2274 struct bfa_ioim_s *ioim = cbarg;
2275
2276 bfa_stats(ioim->itnim, path_tov_expired);
2277 if (!complete) {
2278 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2279 return;
2280 }
2281
2282 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2283 0, 0, NULL, 0);
2284}
2285
2286static void
2287__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2288{
2289 struct bfa_ioim_s *ioim = cbarg;
2290
2291 if (!complete) {
2292 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2293 return;
2294 }
2295
2296 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2297}
2298
2299static void
2300bfa_ioim_sgpg_alloced(void *cbarg)
2301{
2302 struct bfa_ioim_s *ioim = cbarg;
2303
2304 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2305 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002306 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002307 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2308}
2309
Jing Huang5fbe25c2010-10-18 17:17:23 -07002310/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002311 * Send I/O request to firmware.
2312 */
2313static bfa_boolean_t
2314bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2315{
2316 struct bfa_itnim_s *itnim = ioim->itnim;
2317 struct bfi_ioim_req_s *m;
2318 static struct fcp_cmnd_s cmnd_z0 = { 0 };
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002319 struct bfi_sge_s *sge, *sgpge;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002320 u32 pgdlen = 0;
2321 u32 fcp_dl;
2322 u64 addr;
2323 struct scatterlist *sg;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002324 struct bfa_sgpg_s *sgpg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002325 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002326 u32 i, sge_id, pgcumsz;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002327
Jing Huang5fbe25c2010-10-18 17:17:23 -07002328 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002329 * check for room in queue to send request now
2330 */
2331 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2332 if (!m) {
2333 bfa_stats(ioim->itnim, qwait);
2334 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2335 &ioim->iosp->reqq_wait);
2336 return BFA_FALSE;
2337 }
2338
Jing Huang5fbe25c2010-10-18 17:17:23 -07002339 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002340 * build i/o request message next
2341 */
Jing Huangba816ea2010-10-18 17:10:50 -07002342 m->io_tag = cpu_to_be16(ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002343 m->rport_hdl = ioim->itnim->rport->fw_handle;
2344 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
2345
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002346 sge = &m->sges[0];
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002347 sgpg = ioim->sgpg;
2348 sge_id = 0;
2349 sgpge = NULL;
2350 pgcumsz = 0;
2351 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2352 if (i == 0) {
2353 /* build inline IO SG element */
2354 addr = bfa_os_sgaddr(sg_dma_address(sg));
2355 sge->sga = *(union bfi_addr_u *) &addr;
2356 pgdlen = sg_dma_len(sg);
2357 sge->sg_len = pgdlen;
2358 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002359 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002360 bfa_sge_to_be(sge);
2361 sge++;
2362 } else {
2363 if (sge_id == 0)
2364 sgpge = sgpg->sgpg->sges;
2365
2366 addr = bfa_os_sgaddr(sg_dma_address(sg));
2367 sgpge->sga = *(union bfi_addr_u *) &addr;
2368 sgpge->sg_len = sg_dma_len(sg);
2369 pgcumsz += sgpge->sg_len;
2370
2371 /* set flags */
2372 if (i < (ioim->nsges - 1) &&
2373 sge_id < (BFI_SGPG_DATA_SGES - 1))
2374 sgpge->flags = BFI_SGE_DATA;
2375 else if (i < (ioim->nsges - 1))
2376 sgpge->flags = BFI_SGE_DATA_CPL;
2377 else
2378 sgpge->flags = BFI_SGE_DATA_LAST;
2379
2380 bfa_sge_to_le(sgpge);
2381
2382 sgpge++;
2383 if (i == (ioim->nsges - 1)) {
2384 sgpge->flags = BFI_SGE_PGDLEN;
2385 sgpge->sga.a32.addr_lo = 0;
2386 sgpge->sga.a32.addr_hi = 0;
2387 sgpge->sg_len = pgcumsz;
2388 bfa_sge_to_le(sgpge);
2389 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2390 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2391 sgpge->flags = BFI_SGE_LINK;
2392 sgpge->sga = sgpg->sgpg_pa;
2393 sgpge->sg_len = pgcumsz;
2394 bfa_sge_to_le(sgpge);
2395 sge_id = 0;
2396 pgcumsz = 0;
2397 }
2398 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002399 }
2400
2401 if (ioim->nsges > BFI_SGE_INLINE) {
2402 sge->sga = ioim->sgpg->sgpg_pa;
2403 } else {
2404 sge->sga.a32.addr_lo = 0;
2405 sge->sga.a32.addr_hi = 0;
2406 }
2407 sge->sg_len = pgdlen;
2408 sge->flags = BFI_SGE_PGDLEN;
2409 bfa_sge_to_be(sge);
2410
Jing Huang5fbe25c2010-10-18 17:17:23 -07002411 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002412 * set up I/O command parameters
2413 */
Jing Huang6a18b162010-10-18 17:08:54 -07002414 m->cmnd = cmnd_z0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002415 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
2416 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
Jing Huang6a18b162010-10-18 17:08:54 -07002417 m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002418 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
Jing Huangba816ea2010-10-18 17:10:50 -07002419 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002420
Jing Huang5fbe25c2010-10-18 17:17:23 -07002421 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002422 * set up I/O message header
2423 */
2424 switch (m->cmnd.iodir) {
2425 case FCP_IODIR_READ:
2426 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
2427 bfa_stats(itnim, input_reqs);
2428 ioim->itnim->stats.rd_throughput += fcp_dl;
2429 break;
2430 case FCP_IODIR_WRITE:
2431 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
2432 bfa_stats(itnim, output_reqs);
2433 ioim->itnim->stats.wr_throughput += fcp_dl;
2434 break;
2435 case FCP_IODIR_RW:
2436 bfa_stats(itnim, input_reqs);
2437 bfa_stats(itnim, output_reqs);
2438 default:
2439 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2440 }
2441 if (itnim->seq_rec ||
2442 (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
2443 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
2444
Jing Huang5fbe25c2010-10-18 17:17:23 -07002445 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002446 * queue I/O message to firmware
2447 */
2448 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2449 return BFA_TRUE;
2450}
2451
Jing Huang5fbe25c2010-10-18 17:17:23 -07002452/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002453 * Setup any additional SG pages needed.Inline SG element is setup
2454 * at queuing time.
2455 */
2456static bfa_boolean_t
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002457bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002458{
2459 u16 nsgpgs;
2460
2461 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
2462
Jing Huang5fbe25c2010-10-18 17:17:23 -07002463 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002464 * allocate SG pages needed
2465 */
2466 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2467 if (!nsgpgs)
2468 return BFA_TRUE;
2469
2470 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2471 != BFA_STATUS_OK) {
2472 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2473 return BFA_FALSE;
2474 }
2475
2476 ioim->nsgpgs = nsgpgs;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002477 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002478
2479 return BFA_TRUE;
2480}
2481
Jing Huang5fbe25c2010-10-18 17:17:23 -07002482/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002483 * Send I/O abort request to firmware.
2484 */
2485static bfa_boolean_t
2486bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2487{
2488 struct bfi_ioim_abort_req_s *m;
2489 enum bfi_ioim_h2i msgop;
2490
Jing Huang5fbe25c2010-10-18 17:17:23 -07002491 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002492 * check for room in queue to send request now
2493 */
2494 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2495 if (!m)
2496 return BFA_FALSE;
2497
Jing Huang5fbe25c2010-10-18 17:17:23 -07002498 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002499 * build i/o request message next
2500 */
2501 if (ioim->iosp->abort_explicit)
2502 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2503 else
2504 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2505
2506 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
Jing Huangba816ea2010-10-18 17:10:50 -07002507 m->io_tag = cpu_to_be16(ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002508 m->abort_tag = ++ioim->abort_tag;
2509
Jing Huang5fbe25c2010-10-18 17:17:23 -07002510 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002511 * queue I/O message to firmware
2512 */
2513 bfa_reqq_produce(ioim->bfa, ioim->reqq);
2514 return BFA_TRUE;
2515}
2516
Jing Huang5fbe25c2010-10-18 17:17:23 -07002517/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002518 * Call to resume any I/O requests waiting for room in request queue.
2519 */
2520static void
2521bfa_ioim_qresume(void *cbarg)
2522{
2523 struct bfa_ioim_s *ioim = cbarg;
2524
2525 bfa_stats(ioim->itnim, qresumes);
2526 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2527}
2528
2529
2530static void
2531bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2532{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002533 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002534 * Move IO from itnim queue to fcpim global queue since itnim will be
2535 * freed.
2536 */
2537 list_del(&ioim->qe);
2538 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2539
2540 if (!ioim->iosp->tskim) {
2541 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2542 bfa_cb_dequeue(&ioim->hcb_qe);
2543 list_del(&ioim->qe);
2544 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2545 }
2546 bfa_itnim_iodone(ioim->itnim);
2547 } else
Maggie Zhangf7f73812010-12-09 19:08:43 -08002548 bfa_wc_down(&ioim->iosp->tskim->wc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002549}
2550
2551static bfa_boolean_t
2552bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2553{
2554 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2555 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2556 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2557 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2558 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2559 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2560 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2561 return BFA_FALSE;
2562
2563 return BFA_TRUE;
2564}
2565
Jing Huang5fbe25c2010-10-18 17:17:23 -07002566/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002567 * or after the link comes back.
2568 */
2569void
2570bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2571{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002572 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002573 * If path tov timer expired, failback with PATHTOV status - these
2574 * IO requests are not normally retried by IO stack.
2575 *
2576 * Otherwise device cameback online and fail it with normal failed
2577 * status so that IO stack retries these failed IO requests.
2578 */
2579 if (iotov)
2580 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2581 else {
2582 ioim->io_cbfn = __bfa_cb_ioim_failed;
2583 bfa_stats(ioim->itnim, iocom_nexus_abort);
2584 }
2585 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2586
Jing Huang5fbe25c2010-10-18 17:17:23 -07002587 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002588 * Move IO to fcpim global queue since itnim will be
2589 * freed.
2590 */
2591 list_del(&ioim->qe);
2592 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2593}
2594
2595
Jing Huang5fbe25c2010-10-18 17:17:23 -07002596/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002597 * Memory allocation and initialization.
2598 */
2599void
2600bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
2601{
2602 struct bfa_ioim_s *ioim;
2603 struct bfa_ioim_sp_s *iosp;
2604 u16 i;
2605 u8 *snsinfo;
2606 u32 snsbufsz;
2607
Jing Huang5fbe25c2010-10-18 17:17:23 -07002608 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002609 * claim memory first
2610 */
2611 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2612 fcpim->ioim_arr = ioim;
2613 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
2614
2615 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2616 fcpim->ioim_sp_arr = iosp;
2617 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
2618
Jing Huang5fbe25c2010-10-18 17:17:23 -07002619 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002620 * Claim DMA memory for per IO sense data.
2621 */
2622 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
2623 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
2624 bfa_meminfo_dma_phys(minfo) += snsbufsz;
2625
2626 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
2627 bfa_meminfo_dma_virt(minfo) += snsbufsz;
2628 snsinfo = fcpim->snsbase.kva;
2629 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
2630
Jing Huang5fbe25c2010-10-18 17:17:23 -07002631 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002632 * Initialize ioim free queues
2633 */
2634 INIT_LIST_HEAD(&fcpim->ioim_free_q);
2635 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2636 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2637
2638 for (i = 0; i < fcpim->num_ioim_reqs;
2639 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
2640 /*
2641 * initialize IOIM
2642 */
Jing Huang6a18b162010-10-18 17:08:54 -07002643 memset(ioim, 0, sizeof(struct bfa_ioim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002644 ioim->iotag = i;
2645 ioim->bfa = fcpim->bfa;
2646 ioim->fcpim = fcpim;
2647 ioim->iosp = iosp;
2648 iosp->snsinfo = snsinfo;
2649 INIT_LIST_HEAD(&ioim->sgpg_q);
2650 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2651 bfa_ioim_qresume, ioim);
2652 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2653 bfa_ioim_sgpg_alloced, ioim);
2654 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
2655
2656 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2657 }
2658}
2659
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002660void
2661bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2662{
2663 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2664 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2665 struct bfa_ioim_s *ioim;
2666 u16 iotag;
2667 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2668
Jing Huangba816ea2010-10-18 17:10:50 -07002669 iotag = be16_to_cpu(rsp->io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002670
2671 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2672 bfa_assert(ioim->iotag == iotag);
2673
2674 bfa_trc(ioim->bfa, ioim->iotag);
2675 bfa_trc(ioim->bfa, rsp->io_status);
2676 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2677
2678 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
Jing Huang6a18b162010-10-18 17:08:54 -07002679 ioim->iosp->comp_rspmsg = *m;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002680
2681 switch (rsp->io_status) {
2682 case BFI_IOIM_STS_OK:
2683 bfa_stats(ioim->itnim, iocomp_ok);
2684 if (rsp->reuse_io_tag == 0)
2685 evt = BFA_IOIM_SM_DONE;
2686 else
2687 evt = BFA_IOIM_SM_COMP;
2688 break;
2689
2690 case BFI_IOIM_STS_TIMEDOUT:
2691 bfa_stats(ioim->itnim, iocomp_timedout);
2692 case BFI_IOIM_STS_ABORTED:
2693 rsp->io_status = BFI_IOIM_STS_ABORTED;
2694 bfa_stats(ioim->itnim, iocomp_aborted);
2695 if (rsp->reuse_io_tag == 0)
2696 evt = BFA_IOIM_SM_DONE;
2697 else
2698 evt = BFA_IOIM_SM_COMP;
2699 break;
2700
2701 case BFI_IOIM_STS_PROTO_ERR:
2702 bfa_stats(ioim->itnim, iocom_proto_err);
2703 bfa_assert(rsp->reuse_io_tag);
2704 evt = BFA_IOIM_SM_COMP;
2705 break;
2706
2707 case BFI_IOIM_STS_SQER_NEEDED:
2708 bfa_stats(ioim->itnim, iocom_sqer_needed);
2709 bfa_assert(rsp->reuse_io_tag == 0);
2710 evt = BFA_IOIM_SM_SQRETRY;
2711 break;
2712
2713 case BFI_IOIM_STS_RES_FREE:
2714 bfa_stats(ioim->itnim, iocom_res_free);
2715 evt = BFA_IOIM_SM_FREE;
2716 break;
2717
2718 case BFI_IOIM_STS_HOST_ABORTED:
2719 bfa_stats(ioim->itnim, iocom_hostabrts);
2720 if (rsp->abort_tag != ioim->abort_tag) {
2721 bfa_trc(ioim->bfa, rsp->abort_tag);
2722 bfa_trc(ioim->bfa, ioim->abort_tag);
2723 return;
2724 }
2725
2726 if (rsp->reuse_io_tag)
2727 evt = BFA_IOIM_SM_ABORT_COMP;
2728 else
2729 evt = BFA_IOIM_SM_ABORT_DONE;
2730 break;
2731
2732 case BFI_IOIM_STS_UTAG:
2733 bfa_stats(ioim->itnim, iocom_utags);
2734 evt = BFA_IOIM_SM_COMP_UTAG;
2735 break;
2736
2737 default:
2738 bfa_assert(0);
2739 }
2740
2741 bfa_sm_send_event(ioim, evt);
2742}
2743
2744void
2745bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2746{
2747 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2748 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2749 struct bfa_ioim_s *ioim;
2750 u16 iotag;
2751
Jing Huangba816ea2010-10-18 17:10:50 -07002752 iotag = be16_to_cpu(rsp->io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002753
2754 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
2755 bfa_assert(ioim->iotag == iotag);
2756
2757 bfa_trc_fp(ioim->bfa, ioim->iotag);
2758 bfa_ioim_cb_profile_comp(fcpim, ioim);
2759
2760 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2761}
2762
2763void
2764bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
2765{
Jing Huang6a18b162010-10-18 17:08:54 -07002766 ioim->start_time = jiffies;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002767}
2768
2769void
2770bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
2771{
2772 u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
2773 u32 index = bfa_ioim_get_index(fcp_dl);
Jing Huang6a18b162010-10-18 17:08:54 -07002774 u64 end_time = jiffies;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002775 struct bfa_itnim_latency_s *io_lat =
2776 &(ioim->itnim->ioprofile.io_latency);
2777 u32 val = (u32)(end_time - ioim->start_time);
2778
2779 bfa_itnim_ioprofile_update(ioim->itnim, index);
2780
2781 io_lat->count[index]++;
2782 io_lat->min[index] = (io_lat->min[index] < val) ?
2783 io_lat->min[index] : val;
2784 io_lat->max[index] = (io_lat->max[index] > val) ?
2785 io_lat->max[index] : val;
2786 io_lat->avg[index] += val;
2787}
Jing Huang5fbe25c2010-10-18 17:17:23 -07002788/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002789 * Called by itnim to clean up IO while going offline.
2790 */
2791void
2792bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2793{
2794 bfa_trc(ioim->bfa, ioim->iotag);
2795 bfa_stats(ioim->itnim, io_cleanups);
2796
2797 ioim->iosp->tskim = NULL;
2798 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2799}
2800
2801void
2802bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2803{
2804 bfa_trc(ioim->bfa, ioim->iotag);
2805 bfa_stats(ioim->itnim, io_tmaborts);
2806
2807 ioim->iosp->tskim = tskim;
2808 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2809}
2810
Jing Huang5fbe25c2010-10-18 17:17:23 -07002811/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002812 * IOC failure handling.
2813 */
2814void
2815bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2816{
2817 bfa_trc(ioim->bfa, ioim->iotag);
2818 bfa_stats(ioim->itnim, io_iocdowns);
2819 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2820}
2821
Jing Huang5fbe25c2010-10-18 17:17:23 -07002822/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002823 * IO offline TOV popped. Fail the pending IO.
2824 */
2825void
2826bfa_ioim_tov(struct bfa_ioim_s *ioim)
2827{
2828 bfa_trc(ioim->bfa, ioim->iotag);
2829 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2830}
2831
2832
Jing Huang5fbe25c2010-10-18 17:17:23 -07002833/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002834 * Allocate IOIM resource for initiator mode I/O request.
2835 */
2836struct bfa_ioim_s *
2837bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2838 struct bfa_itnim_s *itnim, u16 nsges)
2839{
2840 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
2841 struct bfa_ioim_s *ioim;
2842
Jing Huang5fbe25c2010-10-18 17:17:23 -07002843 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002844 * alocate IOIM resource
2845 */
2846 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
2847 if (!ioim) {
2848 bfa_stats(itnim, no_iotags);
2849 return NULL;
2850 }
2851
2852 ioim->dio = dio;
2853 ioim->itnim = itnim;
2854 ioim->nsges = nsges;
2855 ioim->nsgpgs = 0;
2856
2857 bfa_stats(itnim, total_ios);
2858 fcpim->ios_active++;
2859
2860 list_add_tail(&ioim->qe, &itnim->io_q);
2861 bfa_trc_fp(ioim->bfa, ioim->iotag);
2862
2863 return ioim;
2864}
2865
2866void
2867bfa_ioim_free(struct bfa_ioim_s *ioim)
2868{
2869 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
2870
2871 bfa_trc_fp(ioim->bfa, ioim->iotag);
2872 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
2873
2874 bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
2875 (ioim->nsges > BFI_SGE_INLINE));
2876
2877 if (ioim->nsgpgs > 0)
2878 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2879
2880 bfa_stats(ioim->itnim, io_comps);
2881 fcpim->ios_active--;
2882
2883 list_del(&ioim->qe);
2884 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
2885}
2886
2887void
2888bfa_ioim_start(struct bfa_ioim_s *ioim)
2889{
2890 bfa_trc_fp(ioim->bfa, ioim->iotag);
2891
2892 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2893
Jing Huang5fbe25c2010-10-18 17:17:23 -07002894 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002895 * Obtain the queue over which this request has to be issued
2896 */
2897 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
2898 bfa_cb_ioim_get_reqq(ioim->dio) :
2899 bfa_itnim_get_reqq(ioim);
2900
2901 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2902}
2903
Jing Huang5fbe25c2010-10-18 17:17:23 -07002904/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002905 * Driver I/O abort request.
2906 */
2907bfa_status_t
2908bfa_ioim_abort(struct bfa_ioim_s *ioim)
2909{
2910
2911 bfa_trc(ioim->bfa, ioim->iotag);
2912
2913 if (!bfa_ioim_is_abortable(ioim))
2914 return BFA_STATUS_FAILED;
2915
2916 bfa_stats(ioim->itnim, io_aborts);
2917 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2918
2919 return BFA_STATUS_OK;
2920}
2921
2922
Jing Huang5fbe25c2010-10-18 17:17:23 -07002923/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002924 * BFA TSKIM state machine functions
2925 */
2926
Jing Huang5fbe25c2010-10-18 17:17:23 -07002927/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002928 * Task management command beginning state.
2929 */
2930static void
2931bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2932{
2933 bfa_trc(tskim->bfa, event);
2934
2935 switch (event) {
2936 case BFA_TSKIM_SM_START:
2937 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2938 bfa_tskim_gather_ios(tskim);
2939
Jing Huang5fbe25c2010-10-18 17:17:23 -07002940 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002941 * If device is offline, do not send TM on wire. Just cleanup
2942 * any pending IO requests and complete TM request.
2943 */
2944 if (!bfa_itnim_is_online(tskim->itnim)) {
2945 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2946 tskim->tsk_status = BFI_TSKIM_STS_OK;
2947 bfa_tskim_cleanup_ios(tskim);
2948 return;
2949 }
2950
2951 if (!bfa_tskim_send(tskim)) {
2952 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2953 bfa_stats(tskim->itnim, tm_qwait);
2954 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2955 &tskim->reqq_wait);
2956 }
2957 break;
2958
2959 default:
2960 bfa_sm_fault(tskim->bfa, event);
2961 }
2962}
2963
Jing Huang5fbe25c2010-10-18 17:17:23 -07002964/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002965 * brief
2966 * TM command is active, awaiting completion from firmware to
2967 * cleanup IO requests in TM scope.
2968 */
2969static void
2970bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2971{
2972 bfa_trc(tskim->bfa, event);
2973
2974 switch (event) {
2975 case BFA_TSKIM_SM_DONE:
2976 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2977 bfa_tskim_cleanup_ios(tskim);
2978 break;
2979
2980 case BFA_TSKIM_SM_CLEANUP:
2981 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2982 if (!bfa_tskim_send_abort(tskim)) {
2983 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
2984 bfa_stats(tskim->itnim, tm_qwait);
2985 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2986 &tskim->reqq_wait);
2987 }
2988 break;
2989
2990 case BFA_TSKIM_SM_HWFAIL:
2991 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2992 bfa_tskim_iocdisable_ios(tskim);
2993 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2994 break;
2995
2996 default:
2997 bfa_sm_fault(tskim->bfa, event);
2998 }
2999}
3000
Jing Huang5fbe25c2010-10-18 17:17:23 -07003001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003002 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
3003 * completion event from firmware.
3004 */
3005static void
3006bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3007{
3008 bfa_trc(tskim->bfa, event);
3009
3010 switch (event) {
3011 case BFA_TSKIM_SM_DONE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07003012 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003013 * Ignore and wait for ABORT completion from firmware.
3014 */
3015 break;
3016
3017 case BFA_TSKIM_SM_CLEANUP_DONE:
3018 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3019 bfa_tskim_cleanup_ios(tskim);
3020 break;
3021
3022 case BFA_TSKIM_SM_HWFAIL:
3023 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3024 bfa_tskim_iocdisable_ios(tskim);
3025 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3026 break;
3027
3028 default:
3029 bfa_sm_fault(tskim->bfa, event);
3030 }
3031}
3032
3033static void
3034bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3035{
3036 bfa_trc(tskim->bfa, event);
3037
3038 switch (event) {
3039 case BFA_TSKIM_SM_IOS_DONE:
3040 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3041 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
3042 break;
3043
3044 case BFA_TSKIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07003045 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003046 * Ignore, TM command completed on wire.
3047 * Notify TM conmpletion on IO cleanup completion.
3048 */
3049 break;
3050
3051 case BFA_TSKIM_SM_HWFAIL:
3052 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3053 bfa_tskim_iocdisable_ios(tskim);
3054 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3055 break;
3056
3057 default:
3058 bfa_sm_fault(tskim->bfa, event);
3059 }
3060}
3061
Jing Huang5fbe25c2010-10-18 17:17:23 -07003062/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003063 * Task management command is waiting for room in request CQ
3064 */
3065static void
3066bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3067{
3068 bfa_trc(tskim->bfa, event);
3069
3070 switch (event) {
3071 case BFA_TSKIM_SM_QRESUME:
3072 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
3073 bfa_tskim_send(tskim);
3074 break;
3075
3076 case BFA_TSKIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07003077 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003078 * No need to send TM on wire since ITN is offline.
3079 */
3080 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
3081 bfa_reqq_wcancel(&tskim->reqq_wait);
3082 bfa_tskim_cleanup_ios(tskim);
3083 break;
3084
3085 case BFA_TSKIM_SM_HWFAIL:
3086 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3087 bfa_reqq_wcancel(&tskim->reqq_wait);
3088 bfa_tskim_iocdisable_ios(tskim);
3089 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3090 break;
3091
3092 default:
3093 bfa_sm_fault(tskim->bfa, event);
3094 }
3095}
3096
Jing Huang5fbe25c2010-10-18 17:17:23 -07003097/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003098 * Task management command is active, awaiting for room in request CQ
3099 * to send clean up request.
3100 */
3101static void
3102bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
3103 enum bfa_tskim_event event)
3104{
3105 bfa_trc(tskim->bfa, event);
3106
3107 switch (event) {
3108 case BFA_TSKIM_SM_DONE:
3109 bfa_reqq_wcancel(&tskim->reqq_wait);
Jing Huang5fbe25c2010-10-18 17:17:23 -07003110 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003111 *
3112 * Fall through !!!
3113 */
3114
3115 case BFA_TSKIM_SM_QRESUME:
3116 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
3117 bfa_tskim_send_abort(tskim);
3118 break;
3119
3120 case BFA_TSKIM_SM_HWFAIL:
3121 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
3122 bfa_reqq_wcancel(&tskim->reqq_wait);
3123 bfa_tskim_iocdisable_ios(tskim);
3124 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
3125 break;
3126
3127 default:
3128 bfa_sm_fault(tskim->bfa, event);
3129 }
3130}
3131
Jing Huang5fbe25c2010-10-18 17:17:23 -07003132/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003133 * BFA callback is pending
3134 */
3135static void
3136bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
3137{
3138 bfa_trc(tskim->bfa, event);
3139
3140 switch (event) {
3141 case BFA_TSKIM_SM_HCB:
3142 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3143 bfa_tskim_free(tskim);
3144 break;
3145
3146 case BFA_TSKIM_SM_CLEANUP:
3147 bfa_tskim_notify_comp(tskim);
3148 break;
3149
3150 case BFA_TSKIM_SM_HWFAIL:
3151 break;
3152
3153 default:
3154 bfa_sm_fault(tskim->bfa, event);
3155 }
3156}
3157
3158
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003159static void
3160__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
3161{
3162 struct bfa_tskim_s *tskim = cbarg;
3163
3164 if (!complete) {
3165 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3166 return;
3167 }
3168
3169 bfa_stats(tskim->itnim, tm_success);
3170 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
3171}
3172
3173static void
3174__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
3175{
3176 struct bfa_tskim_s *tskim = cbarg;
3177
3178 if (!complete) {
3179 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
3180 return;
3181 }
3182
3183 bfa_stats(tskim->itnim, tm_failures);
3184 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
3185 BFI_TSKIM_STS_FAILED);
3186}
3187
3188static bfa_boolean_t
3189bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
3190{
3191 switch (tskim->tm_cmnd) {
3192 case FCP_TM_TARGET_RESET:
3193 return BFA_TRUE;
3194
3195 case FCP_TM_ABORT_TASK_SET:
3196 case FCP_TM_CLEAR_TASK_SET:
3197 case FCP_TM_LUN_RESET:
3198 case FCP_TM_CLEAR_ACA:
3199 return (tskim->lun == lun);
3200
3201 default:
3202 bfa_assert(0);
3203 }
3204
3205 return BFA_FALSE;
3206}
3207
Jing Huang5fbe25c2010-10-18 17:17:23 -07003208/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003209 * Gather affected IO requests and task management commands.
3210 */
3211static void
3212bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
3213{
3214 struct bfa_itnim_s *itnim = tskim->itnim;
3215 struct bfa_ioim_s *ioim;
3216 struct list_head *qe, *qen;
3217
3218 INIT_LIST_HEAD(&tskim->io_q);
3219
Jing Huang5fbe25c2010-10-18 17:17:23 -07003220 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003221 * Gather any active IO requests first.
3222 */
3223 list_for_each_safe(qe, qen, &itnim->io_q) {
3224 ioim = (struct bfa_ioim_s *) qe;
3225 if (bfa_tskim_match_scope
3226 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3227 list_del(&ioim->qe);
3228 list_add_tail(&ioim->qe, &tskim->io_q);
3229 }
3230 }
3231
Jing Huang5fbe25c2010-10-18 17:17:23 -07003232 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003233 * Failback any pending IO requests immediately.
3234 */
3235 list_for_each_safe(qe, qen, &itnim->pending_q) {
3236 ioim = (struct bfa_ioim_s *) qe;
3237 if (bfa_tskim_match_scope
3238 (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
3239 list_del(&ioim->qe);
3240 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
3241 bfa_ioim_tov(ioim);
3242 }
3243 }
3244}
3245
Jing Huang5fbe25c2010-10-18 17:17:23 -07003246/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003247 * IO cleanup completion
3248 */
3249static void
3250bfa_tskim_cleanp_comp(void *tskim_cbarg)
3251{
3252 struct bfa_tskim_s *tskim = tskim_cbarg;
3253
3254 bfa_stats(tskim->itnim, tm_io_comps);
3255 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
3256}
3257
Jing Huang5fbe25c2010-10-18 17:17:23 -07003258/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003259 * Gather affected IO requests and task management commands.
3260 */
3261static void
3262bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
3263{
3264 struct bfa_ioim_s *ioim;
3265 struct list_head *qe, *qen;
3266
3267 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
3268
3269 list_for_each_safe(qe, qen, &tskim->io_q) {
3270 ioim = (struct bfa_ioim_s *) qe;
3271 bfa_wc_up(&tskim->wc);
3272 bfa_ioim_cleanup_tm(ioim, tskim);
3273 }
3274
3275 bfa_wc_wait(&tskim->wc);
3276}
3277
Jing Huang5fbe25c2010-10-18 17:17:23 -07003278/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003279 * Send task management request to firmware.
3280 */
3281static bfa_boolean_t
3282bfa_tskim_send(struct bfa_tskim_s *tskim)
3283{
3284 struct bfa_itnim_s *itnim = tskim->itnim;
3285 struct bfi_tskim_req_s *m;
3286
Jing Huang5fbe25c2010-10-18 17:17:23 -07003287 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003288 * check for room in queue to send request now
3289 */
3290 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3291 if (!m)
3292 return BFA_FALSE;
3293
Jing Huang5fbe25c2010-10-18 17:17:23 -07003294 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003295 * build i/o request message next
3296 */
3297 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
3298 bfa_lpuid(tskim->bfa));
3299
Jing Huangba816ea2010-10-18 17:10:50 -07003300 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003301 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3302 m->t_secs = tskim->tsecs;
3303 m->lun = tskim->lun;
3304 m->tm_flags = tskim->tm_cmnd;
3305
Jing Huang5fbe25c2010-10-18 17:17:23 -07003306 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003307 * queue I/O message to firmware
3308 */
3309 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3310 return BFA_TRUE;
3311}
3312
Jing Huang5fbe25c2010-10-18 17:17:23 -07003313/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003314 * Send abort request to cleanup an active TM to firmware.
3315 */
3316static bfa_boolean_t
3317bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3318{
3319 struct bfa_itnim_s *itnim = tskim->itnim;
3320 struct bfi_tskim_abortreq_s *m;
3321
Jing Huang5fbe25c2010-10-18 17:17:23 -07003322 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003323 * check for room in queue to send request now
3324 */
3325 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3326 if (!m)
3327 return BFA_FALSE;
3328
Jing Huang5fbe25c2010-10-18 17:17:23 -07003329 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003330 * build i/o request message next
3331 */
3332 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
3333 bfa_lpuid(tskim->bfa));
3334
Jing Huangba816ea2010-10-18 17:10:50 -07003335 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003336
Jing Huang5fbe25c2010-10-18 17:17:23 -07003337 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003338 * queue I/O message to firmware
3339 */
3340 bfa_reqq_produce(tskim->bfa, itnim->reqq);
3341 return BFA_TRUE;
3342}
3343
Jing Huang5fbe25c2010-10-18 17:17:23 -07003344/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003345 * Call to resume task management cmnd waiting for room in request queue.
3346 */
3347static void
3348bfa_tskim_qresume(void *cbarg)
3349{
3350 struct bfa_tskim_s *tskim = cbarg;
3351
3352 bfa_stats(tskim->itnim, tm_qresumes);
3353 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3354}
3355
Jing Huang5fbe25c2010-10-18 17:17:23 -07003356/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003357 * Cleanup IOs associated with a task mangement command on IOC failures.
3358 */
3359static void
3360bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3361{
3362 struct bfa_ioim_s *ioim;
3363 struct list_head *qe, *qen;
3364
3365 list_for_each_safe(qe, qen, &tskim->io_q) {
3366 ioim = (struct bfa_ioim_s *) qe;
3367 bfa_ioim_iocdisable(ioim);
3368 }
3369}
3370
3371
Jing Huang5fbe25c2010-10-18 17:17:23 -07003372/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003373 * Notification on completions from related ioim.
3374 */
3375void
3376bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3377{
3378 bfa_wc_down(&tskim->wc);
3379}
3380
Jing Huang5fbe25c2010-10-18 17:17:23 -07003381/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003382 * Handle IOC h/w failure notification from itnim.
3383 */
3384void
3385bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3386{
3387 tskim->notify = BFA_FALSE;
3388 bfa_stats(tskim->itnim, tm_iocdowns);
3389 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3390}
3391
Jing Huang5fbe25c2010-10-18 17:17:23 -07003392/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003393 * Cleanup TM command and associated IOs as part of ITNIM offline.
3394 */
3395void
3396bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3397{
3398 tskim->notify = BFA_TRUE;
3399 bfa_stats(tskim->itnim, tm_cleanups);
3400 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3401}
3402
Jing Huang5fbe25c2010-10-18 17:17:23 -07003403/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003404 * Memory allocation and initialization.
3405 */
3406void
3407bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
3408{
3409 struct bfa_tskim_s *tskim;
3410 u16 i;
3411
3412 INIT_LIST_HEAD(&fcpim->tskim_free_q);
3413
3414 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3415 fcpim->tskim_arr = tskim;
3416
3417 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3418 /*
3419 * initialize TSKIM
3420 */
Jing Huang6a18b162010-10-18 17:08:54 -07003421 memset(tskim, 0, sizeof(struct bfa_tskim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003422 tskim->tsk_tag = i;
3423 tskim->bfa = fcpim->bfa;
3424 tskim->fcpim = fcpim;
3425 tskim->notify = BFA_FALSE;
3426 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3427 tskim);
3428 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3429
3430 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3431 }
3432
3433 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3434}
3435
3436void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003437bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3438{
3439 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3440 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3441 struct bfa_tskim_s *tskim;
Jing Huangba816ea2010-10-18 17:10:50 -07003442 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003443
3444 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
3445 bfa_assert(tskim->tsk_tag == tsk_tag);
3446
3447 tskim->tsk_status = rsp->tsk_status;
3448
Jing Huang5fbe25c2010-10-18 17:17:23 -07003449 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003450 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3451 * requests. All other statuses are for normal completions.
3452 */
3453 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3454 bfa_stats(tskim->itnim, tm_cleanup_comps);
3455 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3456 } else {
3457 bfa_stats(tskim->itnim, tm_fw_rsps);
3458 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3459 }
3460}
3461
3462
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003463struct bfa_tskim_s *
3464bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3465{
3466 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
3467 struct bfa_tskim_s *tskim;
3468
3469 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3470
3471 if (tskim)
3472 tskim->dtsk = dtsk;
3473
3474 return tskim;
3475}
3476
3477void
3478bfa_tskim_free(struct bfa_tskim_s *tskim)
3479{
3480 bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
3481 list_del(&tskim->qe);
3482 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3483}
3484
Jing Huang5fbe25c2010-10-18 17:17:23 -07003485/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003486 * Start a task management command.
3487 *
3488 * @param[in] tskim BFA task management command instance
3489 * @param[in] itnim i-t nexus for the task management command
3490 * @param[in] lun lun, if applicable
3491 * @param[in] tm_cmnd Task management command code.
3492 * @param[in] t_secs Timeout in seconds
3493 *
3494 * @return None.
3495 */
3496void
3497bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
3498 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3499{
3500 tskim->itnim = itnim;
3501 tskim->lun = lun;
3502 tskim->tm_cmnd = tm_cmnd;
3503 tskim->tsecs = tsecs;
3504 tskim->notify = BFA_FALSE;
3505 bfa_stats(itnim, tm_cmnds);
3506
3507 list_add_tail(&tskim->qe, &itnim->tsk_q);
3508 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3509}