blob: eb14fd6193a01511026e529a823aaeb94e4766d3 [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_modules.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070020
21BFA_TRC_FILE(HAL, FCPIM);
Jing Huang7725ccf2009-09-23 17:46:15 -070022
Jing Huang5fbe25c2010-10-18 17:17:23 -070023/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070024 * BFA ITNIM Related definitions
25 */
26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27
28#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
29 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
30
31#define bfa_fcpim_additn(__itnim) \
32 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
33#define bfa_fcpim_delitn(__itnim) do { \
Jing Huangd4b671c2010-12-26 21:46:35 -080034 WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070035 bfa_itnim_update_del_itn_stats(__itnim); \
36 list_del(&(__itnim)->qe); \
Jing Huangd4b671c2010-12-26 21:46:35 -080037 WARN_ON(!list_empty(&(__itnim)->io_q)); \
38 WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
39 WARN_ON(!list_empty(&(__itnim)->pending_q)); \
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070040} while (0)
41
42#define bfa_itnim_online_cb(__itnim) do { \
43 if ((__itnim)->bfa->fcs) \
44 bfa_cb_itnim_online((__itnim)->ditn); \
45 else { \
46 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
47 __bfa_cb_itnim_online, (__itnim)); \
48 } \
49} while (0)
50
51#define bfa_itnim_offline_cb(__itnim) do { \
52 if ((__itnim)->bfa->fcs) \
53 bfa_cb_itnim_offline((__itnim)->ditn); \
54 else { \
55 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
56 __bfa_cb_itnim_offline, (__itnim)); \
57 } \
58} while (0)
59
60#define bfa_itnim_sler_cb(__itnim) do { \
61 if ((__itnim)->bfa->fcs) \
62 bfa_cb_itnim_sler((__itnim)->ditn); \
63 else { \
64 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
65 __bfa_cb_itnim_sler, (__itnim)); \
66 } \
67} while (0)
68
Jing Huang5fbe25c2010-10-18 17:17:23 -070069/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -080070 * itnim state machine event
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070071 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070072enum bfa_itnim_event {
73 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
74 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
75 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
76 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
77 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
78 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
79 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
80 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
81 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
82};
83
Jing Huang5fbe25c2010-10-18 17:17:23 -070084/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070085 * BFA IOIM related definitions
86 */
87#define bfa_ioim_move_to_comp_q(__ioim) do { \
88 list_del(&(__ioim)->qe); \
89 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
90} while (0)
91
92
93#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
94 if ((__fcpim)->profile_comp) \
95 (__fcpim)->profile_comp(__ioim); \
96} while (0)
97
98#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
99 if ((__fcpim)->profile_start) \
100 (__fcpim)->profile_start(__ioim); \
101} while (0)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700102
Jing Huang5fbe25c2010-10-18 17:17:23 -0700103/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700104 * IO state machine events
105 */
106enum bfa_ioim_event {
107 BFA_IOIM_SM_START = 1, /* io start request from host */
108 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
109 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
110 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
111 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
112 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
113 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
114 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
115 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
116 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
117 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
118 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
119 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
120 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
121 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
122 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
123 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
124 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
125};
126
127
Jing Huang5fbe25c2010-10-18 17:17:23 -0700128/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700129 * BFA TSKIM related definitions
130 */
131
Jing Huang5fbe25c2010-10-18 17:17:23 -0700132/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700133 * task management completion handling
134 */
135#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
136 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
137 bfa_tskim_notify_comp(__tskim); \
138} while (0)
139
140#define bfa_tskim_notify_comp(__tskim) do { \
141 if ((__tskim)->notify) \
142 bfa_itnim_tskdone((__tskim)->itnim); \
143} while (0)
144
145
146enum bfa_tskim_event {
147 BFA_TSKIM_SM_START = 1, /* TM command start */
148 BFA_TSKIM_SM_DONE = 2, /* TM completion */
149 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
150 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
151 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
152 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
153 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
154 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
155};
156
Jing Huang5fbe25c2010-10-18 17:17:23 -0700157/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700158 * forward declaration for BFA ITNIM functions
159 */
160static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
161static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
162static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
163static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
164static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
165static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
166static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
167static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
168static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
169static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
170static void bfa_itnim_iotov(void *itnim_arg);
171static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
172static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
173static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
174
Jing Huang5fbe25c2010-10-18 17:17:23 -0700175/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700176 * forward declaration of ITNIM state machine
177 */
178static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
179 enum bfa_itnim_event event);
180static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
181 enum bfa_itnim_event event);
182static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
183 enum bfa_itnim_event event);
184static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
185 enum bfa_itnim_event event);
186static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
187 enum bfa_itnim_event event);
188static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
189 enum bfa_itnim_event event);
190static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
191 enum bfa_itnim_event event);
192static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
193 enum bfa_itnim_event event);
194static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
195 enum bfa_itnim_event event);
196static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
197 enum bfa_itnim_event event);
198static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
199 enum bfa_itnim_event event);
200static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
201 enum bfa_itnim_event event);
202static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
203 enum bfa_itnim_event event);
204static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
205 enum bfa_itnim_event event);
206static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
207 enum bfa_itnim_event event);
208
Jing Huang5fbe25c2010-10-18 17:17:23 -0700209/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700210 * forward declaration for BFA IOIM functions
211 */
212static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
Maggie Zhange3e7d3e2010-12-09 19:10:27 -0800213static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700214static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
215static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
216static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
217static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
218static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
219static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
220static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
221static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
222
Jing Huang5fbe25c2010-10-18 17:17:23 -0700223/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700224 * forward declaration of BFA IO state machine
225 */
226static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
227 enum bfa_ioim_event event);
228static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
229 enum bfa_ioim_event event);
230static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
231 enum bfa_ioim_event event);
232static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
233 enum bfa_ioim_event event);
234static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
235 enum bfa_ioim_event event);
236static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
237 enum bfa_ioim_event event);
238static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
239 enum bfa_ioim_event event);
240static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
241 enum bfa_ioim_event event);
242static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
243 enum bfa_ioim_event event);
244static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
245 enum bfa_ioim_event event);
246static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
247 enum bfa_ioim_event event);
248static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
249 enum bfa_ioim_event event);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700250/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700251 * forward declaration for BFA TSKIM functions
252 */
253static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
254static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
255static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
Maggie Zhangf3148782010-12-09 19:11:39 -0800256 struct scsi_lun lun);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700257static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
258static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
259static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
260static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
261static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
262static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
263
Jing Huang5fbe25c2010-10-18 17:17:23 -0700264/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700265 * forward declaration of BFA TSKIM state machine
266 */
267static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
268 enum bfa_tskim_event event);
269static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
270 enum bfa_tskim_event event);
271static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
272 enum bfa_tskim_event event);
273static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
274 enum bfa_tskim_event event);
275static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
276 enum bfa_tskim_event event);
277static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
278 enum bfa_tskim_event event);
279static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
280 enum bfa_tskim_event event);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700281/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800282 * BFA FCP Initiator Mode module
Jing Huang7725ccf2009-09-23 17:46:15 -0700283 */
284
Jing Huang5fbe25c2010-10-18 17:17:23 -0700285/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800286 * Compute and return memory needed by FCP(im) module.
Jing Huang7725ccf2009-09-23 17:46:15 -0700287 */
288static void
289bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
290 u32 *dm_len)
291{
292 bfa_itnim_meminfo(cfg, km_len, dm_len);
293
Jing Huang5fbe25c2010-10-18 17:17:23 -0700294 /*
Jing Huang7725ccf2009-09-23 17:46:15 -0700295 * IO memory
296 */
Jing Huang7725ccf2009-09-23 17:46:15 -0700297 *km_len += cfg->fwcfg.num_ioim_reqs *
298 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
299
Jing Huang5fbe25c2010-10-18 17:17:23 -0700300 /*
Jing Huang7725ccf2009-09-23 17:46:15 -0700301 * task management command memory
302 */
303 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
304 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
305 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
306}
307
308
309static void
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700310bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
311 struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
312 struct bfa_pcidev_s *pcidev)
Jing Huang7725ccf2009-09-23 17:46:15 -0700313{
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700314 struct bfa_fcpim_s *fcpim = &fcp->fcpim;
315 struct bfa_s *bfa = fcp->bfa;
Jing Huang7725ccf2009-09-23 17:46:15 -0700316
317 bfa_trc(bfa, cfg->drvcfg.path_tov);
318 bfa_trc(bfa, cfg->fwcfg.num_rports);
319 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
320 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
321
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700322 fcpim->fcp = fcp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700323 fcpim->bfa = bfa;
324 fcpim->num_itnims = cfg->fwcfg.num_rports;
Jing Huang7725ccf2009-09-23 17:46:15 -0700325 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700326 fcpim->path_tov = cfg->drvcfg.path_tov;
327 fcpim->delay_comp = cfg->drvcfg.delay_comp;
328 fcpim->profile_comp = NULL;
329 fcpim->profile_start = NULL;
Jing Huang7725ccf2009-09-23 17:46:15 -0700330
331 bfa_itnim_attach(fcpim, meminfo);
332 bfa_tskim_attach(fcpim, meminfo);
333 bfa_ioim_attach(fcpim, meminfo);
334}
335
336static void
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700337bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
Jing Huang7725ccf2009-09-23 17:46:15 -0700338{
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700339 struct bfa_fcpim_s *fcpim = &fcp->fcpim;
Jing Huang7725ccf2009-09-23 17:46:15 -0700340 struct bfa_itnim_s *itnim;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700341 struct list_head *qe, *qen;
Jing Huang7725ccf2009-09-23 17:46:15 -0700342
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700343 /* Enqueue unused ioim resources to free_q */
344 list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
345
Jing Huang7725ccf2009-09-23 17:46:15 -0700346 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
347 itnim = (struct bfa_itnim_s *) qe;
348 bfa_itnim_iocdisable(itnim);
349 }
350}
351
352void
353bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
354{
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700355 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -0700356
357 fcpim->path_tov = path_tov * 1000;
358 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
359 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
360}
361
362u16
363bfa_fcpim_path_tov_get(struct bfa_s *bfa)
364{
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700365 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -0700366
Jing Huangf8ceafd2009-09-25 12:29:54 -0700367 return fcpim->path_tov / 1000;
Jing Huang7725ccf2009-09-23 17:46:15 -0700368}
369
Jing Huang7725ccf2009-09-23 17:46:15 -0700370u16
371bfa_fcpim_qdepth_get(struct bfa_s *bfa)
372{
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700373 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -0700374
Jing Huangf8ceafd2009-09-25 12:29:54 -0700375 return fcpim->q_depth;
Jing Huang7725ccf2009-09-23 17:46:15 -0700376}
377
Jing Huang5fbe25c2010-10-18 17:17:23 -0700378/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700379 * BFA ITNIM module state machine functions
380 */
381
Jing Huang5fbe25c2010-10-18 17:17:23 -0700382/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800383 * Beginning/unallocated state - no events expected.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700384 */
385static void
386bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
387{
388 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
389 bfa_trc(itnim->bfa, event);
390
391 switch (event) {
392 case BFA_ITNIM_SM_CREATE:
393 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
394 itnim->is_online = BFA_FALSE;
395 bfa_fcpim_additn(itnim);
396 break;
397
398 default:
399 bfa_sm_fault(itnim->bfa, event);
400 }
401}
402
Jing Huang5fbe25c2010-10-18 17:17:23 -0700403/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800404 * Beginning state, only online event expected.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700405 */
406static void
407bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
408{
409 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
410 bfa_trc(itnim->bfa, event);
411
412 switch (event) {
413 case BFA_ITNIM_SM_ONLINE:
414 if (bfa_itnim_send_fwcreate(itnim))
415 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
416 else
417 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
418 break;
419
420 case BFA_ITNIM_SM_DELETE:
421 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
422 bfa_fcpim_delitn(itnim);
423 break;
424
425 case BFA_ITNIM_SM_HWFAIL:
426 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
427 break;
428
429 default:
430 bfa_sm_fault(itnim->bfa, event);
431 }
432}
433
Jing Huang5fbe25c2010-10-18 17:17:23 -0700434/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700435 * Waiting for itnim create response from firmware.
436 */
437static void
438bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
439{
440 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
441 bfa_trc(itnim->bfa, event);
442
443 switch (event) {
444 case BFA_ITNIM_SM_FWRSP:
445 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
446 itnim->is_online = BFA_TRUE;
447 bfa_itnim_iotov_online(itnim);
448 bfa_itnim_online_cb(itnim);
449 break;
450
451 case BFA_ITNIM_SM_DELETE:
452 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
453 break;
454
455 case BFA_ITNIM_SM_OFFLINE:
456 if (bfa_itnim_send_fwdelete(itnim))
457 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
458 else
459 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
460 break;
461
462 case BFA_ITNIM_SM_HWFAIL:
463 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
464 break;
465
466 default:
467 bfa_sm_fault(itnim->bfa, event);
468 }
469}
470
471static void
472bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
473 enum bfa_itnim_event event)
474{
475 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
476 bfa_trc(itnim->bfa, event);
477
478 switch (event) {
479 case BFA_ITNIM_SM_QRESUME:
480 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
481 bfa_itnim_send_fwcreate(itnim);
482 break;
483
484 case BFA_ITNIM_SM_DELETE:
485 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
486 bfa_reqq_wcancel(&itnim->reqq_wait);
487 bfa_fcpim_delitn(itnim);
488 break;
489
490 case BFA_ITNIM_SM_OFFLINE:
491 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
492 bfa_reqq_wcancel(&itnim->reqq_wait);
493 bfa_itnim_offline_cb(itnim);
494 break;
495
496 case BFA_ITNIM_SM_HWFAIL:
497 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
498 bfa_reqq_wcancel(&itnim->reqq_wait);
499 break;
500
501 default:
502 bfa_sm_fault(itnim->bfa, event);
503 }
504}
505
Jing Huang5fbe25c2010-10-18 17:17:23 -0700506/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800507 * Waiting for itnim create response from firmware, a delete is pending.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700508 */
509static void
510bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
511 enum bfa_itnim_event event)
512{
513 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
514 bfa_trc(itnim->bfa, event);
515
516 switch (event) {
517 case BFA_ITNIM_SM_FWRSP:
518 if (bfa_itnim_send_fwdelete(itnim))
519 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
520 else
521 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
522 break;
523
524 case BFA_ITNIM_SM_HWFAIL:
525 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
526 bfa_fcpim_delitn(itnim);
527 break;
528
529 default:
530 bfa_sm_fault(itnim->bfa, event);
531 }
532}
533
Jing Huang5fbe25c2010-10-18 17:17:23 -0700534/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800535 * Online state - normal parking state.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700536 */
537static void
538bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
539{
540 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
541 bfa_trc(itnim->bfa, event);
542
543 switch (event) {
544 case BFA_ITNIM_SM_OFFLINE:
545 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
546 itnim->is_online = BFA_FALSE;
547 bfa_itnim_iotov_start(itnim);
548 bfa_itnim_cleanup(itnim);
549 break;
550
551 case BFA_ITNIM_SM_DELETE:
552 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
553 itnim->is_online = BFA_FALSE;
554 bfa_itnim_cleanup(itnim);
555 break;
556
557 case BFA_ITNIM_SM_SLER:
558 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
559 itnim->is_online = BFA_FALSE;
560 bfa_itnim_iotov_start(itnim);
561 bfa_itnim_sler_cb(itnim);
562 break;
563
564 case BFA_ITNIM_SM_HWFAIL:
565 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
566 itnim->is_online = BFA_FALSE;
567 bfa_itnim_iotov_start(itnim);
568 bfa_itnim_iocdisable_cleanup(itnim);
569 break;
570
571 default:
572 bfa_sm_fault(itnim->bfa, event);
573 }
574}
575
Jing Huang5fbe25c2010-10-18 17:17:23 -0700576/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800577 * Second level error recovery need.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700578 */
579static void
580bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
581{
582 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
583 bfa_trc(itnim->bfa, event);
584
585 switch (event) {
586 case BFA_ITNIM_SM_OFFLINE:
587 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
588 bfa_itnim_cleanup(itnim);
589 break;
590
591 case BFA_ITNIM_SM_DELETE:
592 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
593 bfa_itnim_cleanup(itnim);
594 bfa_itnim_iotov_delete(itnim);
595 break;
596
597 case BFA_ITNIM_SM_HWFAIL:
598 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
599 bfa_itnim_iocdisable_cleanup(itnim);
600 break;
601
602 default:
603 bfa_sm_fault(itnim->bfa, event);
604 }
605}
606
Jing Huang5fbe25c2010-10-18 17:17:23 -0700607/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800608 * Going offline. Waiting for active IO cleanup.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700609 */
610static void
611bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
612 enum bfa_itnim_event event)
613{
614 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
615 bfa_trc(itnim->bfa, event);
616
617 switch (event) {
618 case BFA_ITNIM_SM_CLEANUP:
619 if (bfa_itnim_send_fwdelete(itnim))
620 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
621 else
622 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
623 break;
624
625 case BFA_ITNIM_SM_DELETE:
626 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
627 bfa_itnim_iotov_delete(itnim);
628 break;
629
630 case BFA_ITNIM_SM_HWFAIL:
631 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
632 bfa_itnim_iocdisable_cleanup(itnim);
633 bfa_itnim_offline_cb(itnim);
634 break;
635
636 case BFA_ITNIM_SM_SLER:
637 break;
638
639 default:
640 bfa_sm_fault(itnim->bfa, event);
641 }
642}
643
Jing Huang5fbe25c2010-10-18 17:17:23 -0700644/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800645 * Deleting itnim. Waiting for active IO cleanup.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700646 */
647static void
648bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
649 enum bfa_itnim_event event)
650{
651 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
652 bfa_trc(itnim->bfa, event);
653
654 switch (event) {
655 case BFA_ITNIM_SM_CLEANUP:
656 if (bfa_itnim_send_fwdelete(itnim))
657 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
658 else
659 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
660 break;
661
662 case BFA_ITNIM_SM_HWFAIL:
663 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
664 bfa_itnim_iocdisable_cleanup(itnim);
665 break;
666
667 default:
668 bfa_sm_fault(itnim->bfa, event);
669 }
670}
671
Jing Huang5fbe25c2010-10-18 17:17:23 -0700672/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700673 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
674 */
675static void
676bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
677{
678 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
679 bfa_trc(itnim->bfa, event);
680
681 switch (event) {
682 case BFA_ITNIM_SM_FWRSP:
683 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
684 bfa_itnim_offline_cb(itnim);
685 break;
686
687 case BFA_ITNIM_SM_DELETE:
688 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
689 break;
690
691 case BFA_ITNIM_SM_HWFAIL:
692 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
693 bfa_itnim_offline_cb(itnim);
694 break;
695
696 default:
697 bfa_sm_fault(itnim->bfa, event);
698 }
699}
700
701static void
702bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
703 enum bfa_itnim_event event)
704{
705 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
706 bfa_trc(itnim->bfa, event);
707
708 switch (event) {
709 case BFA_ITNIM_SM_QRESUME:
710 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
711 bfa_itnim_send_fwdelete(itnim);
712 break;
713
714 case BFA_ITNIM_SM_DELETE:
715 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
716 break;
717
718 case BFA_ITNIM_SM_HWFAIL:
719 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
720 bfa_reqq_wcancel(&itnim->reqq_wait);
721 bfa_itnim_offline_cb(itnim);
722 break;
723
724 default:
725 bfa_sm_fault(itnim->bfa, event);
726 }
727}
728
Jing Huang5fbe25c2010-10-18 17:17:23 -0700729/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800730 * Offline state.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700731 */
732static void
733bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
734{
735 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
736 bfa_trc(itnim->bfa, event);
737
738 switch (event) {
739 case BFA_ITNIM_SM_DELETE:
740 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
741 bfa_itnim_iotov_delete(itnim);
742 bfa_fcpim_delitn(itnim);
743 break;
744
745 case BFA_ITNIM_SM_ONLINE:
746 if (bfa_itnim_send_fwcreate(itnim))
747 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
748 else
749 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
750 break;
751
752 case BFA_ITNIM_SM_HWFAIL:
753 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
754 break;
755
756 default:
757 bfa_sm_fault(itnim->bfa, event);
758 }
759}
760
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700761static void
762bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
763 enum bfa_itnim_event event)
764{
765 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
766 bfa_trc(itnim->bfa, event);
767
768 switch (event) {
769 case BFA_ITNIM_SM_DELETE:
770 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
771 bfa_itnim_iotov_delete(itnim);
772 bfa_fcpim_delitn(itnim);
773 break;
774
775 case BFA_ITNIM_SM_OFFLINE:
776 bfa_itnim_offline_cb(itnim);
777 break;
778
779 case BFA_ITNIM_SM_ONLINE:
780 if (bfa_itnim_send_fwcreate(itnim))
781 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
782 else
783 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
784 break;
785
786 case BFA_ITNIM_SM_HWFAIL:
787 break;
788
789 default:
790 bfa_sm_fault(itnim->bfa, event);
791 }
792}
793
Jing Huang5fbe25c2010-10-18 17:17:23 -0700794/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800795 * Itnim is deleted, waiting for firmware response to delete.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700796 */
797static void
798bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
799{
800 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
801 bfa_trc(itnim->bfa, event);
802
803 switch (event) {
804 case BFA_ITNIM_SM_FWRSP:
805 case BFA_ITNIM_SM_HWFAIL:
806 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
807 bfa_fcpim_delitn(itnim);
808 break;
809
810 default:
811 bfa_sm_fault(itnim->bfa, event);
812 }
813}
814
815static void
816bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
817 enum bfa_itnim_event event)
818{
819 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
820 bfa_trc(itnim->bfa, event);
821
822 switch (event) {
823 case BFA_ITNIM_SM_QRESUME:
824 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
825 bfa_itnim_send_fwdelete(itnim);
826 break;
827
828 case BFA_ITNIM_SM_HWFAIL:
829 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
830 bfa_reqq_wcancel(&itnim->reqq_wait);
831 bfa_fcpim_delitn(itnim);
832 break;
833
834 default:
835 bfa_sm_fault(itnim->bfa, event);
836 }
837}
838
Jing Huang5fbe25c2010-10-18 17:17:23 -0700839/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800840 * Initiate cleanup of all IOs on an IOC failure.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700841 */
842static void
843bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
844{
845 struct bfa_tskim_s *tskim;
846 struct bfa_ioim_s *ioim;
847 struct list_head *qe, *qen;
848
849 list_for_each_safe(qe, qen, &itnim->tsk_q) {
850 tskim = (struct bfa_tskim_s *) qe;
851 bfa_tskim_iocdisable(tskim);
852 }
853
854 list_for_each_safe(qe, qen, &itnim->io_q) {
855 ioim = (struct bfa_ioim_s *) qe;
856 bfa_ioim_iocdisable(ioim);
857 }
858
Jing Huang5fbe25c2010-10-18 17:17:23 -0700859 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700860 * For IO request in pending queue, we pretend an early timeout.
861 */
862 list_for_each_safe(qe, qen, &itnim->pending_q) {
863 ioim = (struct bfa_ioim_s *) qe;
864 bfa_ioim_tov(ioim);
865 }
866
867 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
868 ioim = (struct bfa_ioim_s *) qe;
869 bfa_ioim_iocdisable(ioim);
870 }
871}
872
Jing Huang5fbe25c2010-10-18 17:17:23 -0700873/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800874 * IO cleanup completion
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700875 */
876static void
877bfa_itnim_cleanp_comp(void *itnim_cbarg)
878{
879 struct bfa_itnim_s *itnim = itnim_cbarg;
880
881 bfa_stats(itnim, cleanup_comps);
882 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
883}
884
Jing Huang5fbe25c2010-10-18 17:17:23 -0700885/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800886 * Initiate cleanup of all IOs.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700887 */
888static void
889bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
890{
891 struct bfa_ioim_s *ioim;
892 struct bfa_tskim_s *tskim;
893 struct list_head *qe, *qen;
894
895 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
896
897 list_for_each_safe(qe, qen, &itnim->io_q) {
898 ioim = (struct bfa_ioim_s *) qe;
899
Jing Huang5fbe25c2010-10-18 17:17:23 -0700900 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700901 * Move IO to a cleanup queue from active queue so that a later
902 * TM will not pickup this IO.
903 */
904 list_del(&ioim->qe);
905 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
906
907 bfa_wc_up(&itnim->wc);
908 bfa_ioim_cleanup(ioim);
909 }
910
911 list_for_each_safe(qe, qen, &itnim->tsk_q) {
912 tskim = (struct bfa_tskim_s *) qe;
913 bfa_wc_up(&itnim->wc);
914 bfa_tskim_cleanup(tskim);
915 }
916
917 bfa_wc_wait(&itnim->wc);
918}
919
920static void
921__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
922{
923 struct bfa_itnim_s *itnim = cbarg;
924
925 if (complete)
926 bfa_cb_itnim_online(itnim->ditn);
927}
928
929static void
930__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
931{
932 struct bfa_itnim_s *itnim = cbarg;
933
934 if (complete)
935 bfa_cb_itnim_offline(itnim->ditn);
936}
937
938static void
939__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
940{
941 struct bfa_itnim_s *itnim = cbarg;
942
943 if (complete)
944 bfa_cb_itnim_sler(itnim->ditn);
945}
946
Jing Huang5fbe25c2010-10-18 17:17:23 -0700947/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700948 * Call to resume any I/O requests waiting for room in request queue.
949 */
950static void
951bfa_itnim_qresume(void *cbarg)
952{
953 struct bfa_itnim_s *itnim = cbarg;
954
955 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
956}
957
Jing Huang5fbe25c2010-10-18 17:17:23 -0700958/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700959 * bfa_itnim_public
960 */
961
962void
963bfa_itnim_iodone(struct bfa_itnim_s *itnim)
964{
965 bfa_wc_down(&itnim->wc);
966}
967
968void
969bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
970{
971 bfa_wc_down(&itnim->wc);
972}
973
974void
975bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
976 u32 *dm_len)
977{
Jing Huang5fbe25c2010-10-18 17:17:23 -0700978 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700979 * ITN memory
980 */
981 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
982}
983
984void
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700985bfa_itnim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700986{
987 struct bfa_s *bfa = fcpim->bfa;
988 struct bfa_itnim_s *itnim;
989 int i, j;
990
991 INIT_LIST_HEAD(&fcpim->itnim_q);
992
993 itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
994 fcpim->itnim_arr = itnim;
995
996 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
Jing Huang6a18b162010-10-18 17:08:54 -0700997 memset(itnim, 0, sizeof(struct bfa_itnim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700998 itnim->bfa = bfa;
999 itnim->fcpim = fcpim;
1000 itnim->reqq = BFA_REQQ_QOS_LO;
1001 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1002 itnim->iotov_active = BFA_FALSE;
1003 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1004
1005 INIT_LIST_HEAD(&itnim->io_q);
1006 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1007 INIT_LIST_HEAD(&itnim->pending_q);
1008 INIT_LIST_HEAD(&itnim->tsk_q);
1009 INIT_LIST_HEAD(&itnim->delay_comp_q);
1010 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1011 itnim->ioprofile.io_latency.min[j] = ~0;
1012 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1013 }
1014
1015 bfa_meminfo_kva(minfo) = (u8 *) itnim;
1016}
1017
1018void
1019bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1020{
1021 bfa_stats(itnim, ioc_disabled);
1022 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1023}
1024
1025static bfa_boolean_t
1026bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1027{
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001028 struct bfi_itn_create_req_s *m;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001029
1030 itnim->msg_no++;
1031
Jing Huang5fbe25c2010-10-18 17:17:23 -07001032 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001033 * check for room in queue to send request now
1034 */
1035 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1036 if (!m) {
1037 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1038 return BFA_FALSE;
1039 }
1040
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001041 bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001042 bfa_fn_lpu(itnim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001043 m->fw_handle = itnim->rport->fw_handle;
1044 m->class = FC_CLASS_3;
1045 m->seq_rec = itnim->seq_rec;
1046 m->msg_no = itnim->msg_no;
1047 bfa_stats(itnim, fw_create);
1048
Jing Huang5fbe25c2010-10-18 17:17:23 -07001049 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001050 * queue I/O message to firmware
1051 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001052 bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001053 return BFA_TRUE;
1054}
1055
1056static bfa_boolean_t
1057bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1058{
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001059 struct bfi_itn_delete_req_s *m;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001060
Jing Huang5fbe25c2010-10-18 17:17:23 -07001061 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001062 * check for room in queue to send request now
1063 */
1064 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1065 if (!m) {
1066 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1067 return BFA_FALSE;
1068 }
1069
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001070 bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001071 bfa_fn_lpu(itnim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001072 m->fw_handle = itnim->rport->fw_handle;
1073 bfa_stats(itnim, fw_delete);
1074
Jing Huang5fbe25c2010-10-18 17:17:23 -07001075 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001076 * queue I/O message to firmware
1077 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001078 bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001079 return BFA_TRUE;
1080}
1081
Jing Huang5fbe25c2010-10-18 17:17:23 -07001082/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001083 * Cleanup all pending failed inflight requests.
1084 */
1085static void
1086bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1087{
1088 struct bfa_ioim_s *ioim;
1089 struct list_head *qe, *qen;
1090
1091 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1092 ioim = (struct bfa_ioim_s *)qe;
1093 bfa_ioim_delayed_comp(ioim, iotov);
1094 }
1095}
1096
Jing Huang5fbe25c2010-10-18 17:17:23 -07001097/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001098 * Start all pending IO requests.
1099 */
1100static void
1101bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1102{
1103 struct bfa_ioim_s *ioim;
1104
1105 bfa_itnim_iotov_stop(itnim);
1106
Jing Huang5fbe25c2010-10-18 17:17:23 -07001107 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001108 * Abort all inflight IO requests in the queue
1109 */
1110 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1111
Jing Huang5fbe25c2010-10-18 17:17:23 -07001112 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001113 * Start all pending IO requests.
1114 */
1115 while (!list_empty(&itnim->pending_q)) {
1116 bfa_q_deq(&itnim->pending_q, &ioim);
1117 list_add_tail(&ioim->qe, &itnim->io_q);
1118 bfa_ioim_start(ioim);
1119 }
1120}
1121
Jing Huang5fbe25c2010-10-18 17:17:23 -07001122/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001123 * Fail all pending IO requests
1124 */
1125static void
1126bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1127{
1128 struct bfa_ioim_s *ioim;
1129
Jing Huang5fbe25c2010-10-18 17:17:23 -07001130 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001131 * Fail all inflight IO requests in the queue
1132 */
1133 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1134
Jing Huang5fbe25c2010-10-18 17:17:23 -07001135 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001136 * Fail any pending IO requests.
1137 */
1138 while (!list_empty(&itnim->pending_q)) {
1139 bfa_q_deq(&itnim->pending_q, &ioim);
1140 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1141 bfa_ioim_tov(ioim);
1142 }
1143}
1144
Jing Huang5fbe25c2010-10-18 17:17:23 -07001145/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001146 * IO TOV timer callback. Fail any pending IO requests.
1147 */
1148static void
1149bfa_itnim_iotov(void *itnim_arg)
1150{
1151 struct bfa_itnim_s *itnim = itnim_arg;
1152
1153 itnim->iotov_active = BFA_FALSE;
1154
1155 bfa_cb_itnim_tov_begin(itnim->ditn);
1156 bfa_itnim_iotov_cleanup(itnim);
1157 bfa_cb_itnim_tov(itnim->ditn);
1158}
1159
Jing Huang5fbe25c2010-10-18 17:17:23 -07001160/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001161 * Start IO TOV timer for failing back pending IO requests in offline state.
1162 */
1163static void
1164bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1165{
1166 if (itnim->fcpim->path_tov > 0) {
1167
1168 itnim->iotov_active = BFA_TRUE;
Jing Huangd4b671c2010-12-26 21:46:35 -08001169 WARN_ON(!bfa_itnim_hold_io(itnim));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001170 bfa_timer_start(itnim->bfa, &itnim->timer,
1171 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1172 }
1173}
1174
Jing Huang5fbe25c2010-10-18 17:17:23 -07001175/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001176 * Stop IO TOV timer.
1177 */
1178static void
1179bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1180{
1181 if (itnim->iotov_active) {
1182 itnim->iotov_active = BFA_FALSE;
1183 bfa_timer_stop(&itnim->timer);
1184 }
1185}
1186
Jing Huang5fbe25c2010-10-18 17:17:23 -07001187/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001188 * Stop IO TOV timer.
1189 */
1190static void
1191bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1192{
1193 bfa_boolean_t pathtov_active = BFA_FALSE;
1194
1195 if (itnim->iotov_active)
1196 pathtov_active = BFA_TRUE;
1197
1198 bfa_itnim_iotov_stop(itnim);
1199 if (pathtov_active)
1200 bfa_cb_itnim_tov_begin(itnim->ditn);
1201 bfa_itnim_iotov_cleanup(itnim);
1202 if (pathtov_active)
1203 bfa_cb_itnim_tov(itnim->ditn);
1204}
1205
1206static void
1207bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1208{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001209 struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001210 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1211 itnim->stats.iocomp_aborted;
1212 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1213 itnim->stats.iocomp_timedout;
1214 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1215 itnim->stats.iocom_sqer_needed;
1216 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1217 itnim->stats.iocom_res_free;
1218 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1219 itnim->stats.iocom_hostabrts;
1220 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1221 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1222 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1223}
1224
Jing Huang5fbe25c2010-10-18 17:17:23 -07001225/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001226 * bfa_itnim_public
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001227 */
1228
Jing Huang5fbe25c2010-10-18 17:17:23 -07001229/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001230 * Itnim interrupt processing.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001231 */
1232void
1233bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1234{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001235 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001236 union bfi_itn_i2h_msg_u msg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001237 struct bfa_itnim_s *itnim;
1238
1239 bfa_trc(bfa, m->mhdr.msg_id);
1240
1241 msg.msg = m;
1242
1243 switch (m->mhdr.msg_id) {
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001244 case BFI_ITN_I2H_CREATE_RSP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001245 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1246 msg.create_rsp->bfa_handle);
Jing Huangd4b671c2010-12-26 21:46:35 -08001247 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001248 bfa_stats(itnim, create_comps);
1249 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1250 break;
1251
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001252 case BFI_ITN_I2H_DELETE_RSP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001253 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1254 msg.delete_rsp->bfa_handle);
Jing Huangd4b671c2010-12-26 21:46:35 -08001255 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001256 bfa_stats(itnim, delete_comps);
1257 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1258 break;
1259
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001260 case BFI_ITN_I2H_SLER_EVENT:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001261 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1262 msg.sler_event->bfa_handle);
1263 bfa_stats(itnim, sler_events);
1264 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1265 break;
1266
1267 default:
1268 bfa_trc(bfa, m->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08001269 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001270 }
1271}
1272
Jing Huang5fbe25c2010-10-18 17:17:23 -07001273/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001274 * bfa_itnim_api
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001275 */
1276
1277struct bfa_itnim_s *
1278bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1279{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001280 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001281 struct bfa_itnim_s *itnim;
1282
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001283 bfa_itn_create(bfa, rport, bfa_itnim_isr);
1284
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001285 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
Jing Huangd4b671c2010-12-26 21:46:35 -08001286 WARN_ON(itnim->rport != rport);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001287
1288 itnim->ditn = ditn;
1289
1290 bfa_stats(itnim, creates);
1291 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1292
1293 return itnim;
1294}
1295
1296void
1297bfa_itnim_delete(struct bfa_itnim_s *itnim)
1298{
1299 bfa_stats(itnim, deletes);
1300 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1301}
1302
1303void
1304bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1305{
1306 itnim->seq_rec = seq_rec;
1307 bfa_stats(itnim, onlines);
1308 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1309}
1310
1311void
1312bfa_itnim_offline(struct bfa_itnim_s *itnim)
1313{
1314 bfa_stats(itnim, offlines);
1315 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1316}
1317
Jing Huang5fbe25c2010-10-18 17:17:23 -07001318/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001319 * Return true if itnim is considered offline for holding off IO request.
1320 * IO is not held if itnim is being deleted.
1321 */
1322bfa_boolean_t
1323bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1324{
1325 return itnim->fcpim->path_tov && itnim->iotov_active &&
1326 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1327 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1328 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1329 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1330 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1331 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1332}
1333
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001334void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001335bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1336{
1337 int j;
Jing Huang6a18b162010-10-18 17:08:54 -07001338 memset(&itnim->stats, 0, sizeof(itnim->stats));
1339 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001340 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1341 itnim->ioprofile.io_latency.min[j] = ~0;
1342}
1343
Jing Huang5fbe25c2010-10-18 17:17:23 -07001344/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001345 * BFA IO module state machine functions
1346 */
1347
Jing Huang5fbe25c2010-10-18 17:17:23 -07001348/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001349 * IO is not started (unallocated).
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001350 */
1351static void
1352bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1353{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001354 switch (event) {
1355 case BFA_IOIM_SM_START:
1356 if (!bfa_itnim_is_online(ioim->itnim)) {
1357 if (!bfa_itnim_hold_io(ioim->itnim)) {
1358 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1359 list_del(&ioim->qe);
1360 list_add_tail(&ioim->qe,
1361 &ioim->fcpim->ioim_comp_q);
1362 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1363 __bfa_cb_ioim_pathtov, ioim);
1364 } else {
1365 list_del(&ioim->qe);
1366 list_add_tail(&ioim->qe,
1367 &ioim->itnim->pending_q);
1368 }
1369 break;
1370 }
1371
1372 if (ioim->nsges > BFI_SGE_INLINE) {
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08001373 if (!bfa_ioim_sgpg_alloc(ioim)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001374 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1375 return;
1376 }
1377 }
1378
1379 if (!bfa_ioim_send_ioreq(ioim)) {
1380 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1381 break;
1382 }
1383
1384 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1385 break;
1386
1387 case BFA_IOIM_SM_IOTOV:
1388 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1389 bfa_ioim_move_to_comp_q(ioim);
1390 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1391 __bfa_cb_ioim_pathtov, ioim);
1392 break;
1393
1394 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001395 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001396 * IO in pending queue can get abort requests. Complete abort
1397 * requests immediately.
1398 */
1399 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
Jing Huangd4b671c2010-12-26 21:46:35 -08001400 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001401 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1402 __bfa_cb_ioim_abort, ioim);
1403 break;
1404
1405 default:
1406 bfa_sm_fault(ioim->bfa, event);
1407 }
1408}
1409
Jing Huang5fbe25c2010-10-18 17:17:23 -07001410/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001411 * IO is waiting for SG pages.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001412 */
1413static void
1414bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1415{
1416 bfa_trc(ioim->bfa, ioim->iotag);
1417 bfa_trc(ioim->bfa, event);
1418
1419 switch (event) {
1420 case BFA_IOIM_SM_SGALLOCED:
1421 if (!bfa_ioim_send_ioreq(ioim)) {
1422 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1423 break;
1424 }
1425 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1426 break;
1427
1428 case BFA_IOIM_SM_CLEANUP:
1429 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1430 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1431 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1432 ioim);
1433 bfa_ioim_notify_cleanup(ioim);
1434 break;
1435
1436 case BFA_IOIM_SM_ABORT:
1437 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1438 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1439 bfa_ioim_move_to_comp_q(ioim);
1440 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1441 ioim);
1442 break;
1443
1444 case BFA_IOIM_SM_HWFAIL:
1445 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1446 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1447 bfa_ioim_move_to_comp_q(ioim);
1448 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1449 ioim);
1450 break;
1451
1452 default:
1453 bfa_sm_fault(ioim->bfa, event);
1454 }
1455}
1456
Jing Huang5fbe25c2010-10-18 17:17:23 -07001457/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001458 * IO is active.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001459 */
1460static void
1461bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1462{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001463 switch (event) {
1464 case BFA_IOIM_SM_COMP_GOOD:
1465 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1466 bfa_ioim_move_to_comp_q(ioim);
1467 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1468 __bfa_cb_ioim_good_comp, ioim);
1469 break;
1470
1471 case BFA_IOIM_SM_COMP:
1472 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1473 bfa_ioim_move_to_comp_q(ioim);
1474 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1475 ioim);
1476 break;
1477
1478 case BFA_IOIM_SM_DONE:
1479 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1480 bfa_ioim_move_to_comp_q(ioim);
1481 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1482 ioim);
1483 break;
1484
1485 case BFA_IOIM_SM_ABORT:
1486 ioim->iosp->abort_explicit = BFA_TRUE;
1487 ioim->io_cbfn = __bfa_cb_ioim_abort;
1488
1489 if (bfa_ioim_send_abort(ioim))
1490 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1491 else {
1492 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1493 bfa_stats(ioim->itnim, qwait);
1494 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1495 &ioim->iosp->reqq_wait);
1496 }
1497 break;
1498
1499 case BFA_IOIM_SM_CLEANUP:
1500 ioim->iosp->abort_explicit = BFA_FALSE;
1501 ioim->io_cbfn = __bfa_cb_ioim_failed;
1502
1503 if (bfa_ioim_send_abort(ioim))
1504 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1505 else {
1506 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1507 bfa_stats(ioim->itnim, qwait);
1508 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1509 &ioim->iosp->reqq_wait);
1510 }
1511 break;
1512
1513 case BFA_IOIM_SM_HWFAIL:
1514 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1515 bfa_ioim_move_to_comp_q(ioim);
1516 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1517 ioim);
1518 break;
1519
1520 case BFA_IOIM_SM_SQRETRY:
Krishna Gudipati15821f02010-12-13 16:23:27 -08001521 if (bfa_ioim_maxretry_reached(ioim)) {
1522 /* max retry reached, free IO */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001523 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1524 bfa_ioim_move_to_comp_q(ioim);
1525 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1526 __bfa_cb_ioim_failed, ioim);
1527 break;
1528 }
1529 /* waiting for IO tag resource free */
1530 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1531 break;
1532
1533 default:
1534 bfa_sm_fault(ioim->bfa, event);
1535 }
1536}
1537
Jing Huang5fbe25c2010-10-18 17:17:23 -07001538/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001539 * IO is retried with new tag.
1540 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001541static void
1542bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1543{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001544 switch (event) {
1545 case BFA_IOIM_SM_FREE:
1546 /* abts and rrq done. Now retry the IO with new tag */
Krishna Gudipati15821f02010-12-13 16:23:27 -08001547 bfa_ioim_update_iotag(ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001548 if (!bfa_ioim_send_ioreq(ioim)) {
1549 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1550 break;
1551 }
1552 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1553 break;
1554
1555 case BFA_IOIM_SM_CLEANUP:
1556 ioim->iosp->abort_explicit = BFA_FALSE;
1557 ioim->io_cbfn = __bfa_cb_ioim_failed;
1558
1559 if (bfa_ioim_send_abort(ioim))
1560 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1561 else {
1562 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1563 bfa_stats(ioim->itnim, qwait);
1564 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1565 &ioim->iosp->reqq_wait);
1566 }
1567 break;
1568
1569 case BFA_IOIM_SM_HWFAIL:
1570 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1571 bfa_ioim_move_to_comp_q(ioim);
1572 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1573 __bfa_cb_ioim_failed, ioim);
1574 break;
1575
1576 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001577 /* in this state IO abort is done.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001578 * Waiting for IO tag resource free.
1579 */
1580 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1581 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1582 ioim);
1583 break;
1584
1585 default:
1586 bfa_sm_fault(ioim->bfa, event);
1587 }
1588}
1589
Jing Huang5fbe25c2010-10-18 17:17:23 -07001590/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001591 * IO is being aborted, waiting for completion from firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001592 */
1593static void
1594bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1595{
1596 bfa_trc(ioim->bfa, ioim->iotag);
1597 bfa_trc(ioim->bfa, event);
1598
1599 switch (event) {
1600 case BFA_IOIM_SM_COMP_GOOD:
1601 case BFA_IOIM_SM_COMP:
1602 case BFA_IOIM_SM_DONE:
1603 case BFA_IOIM_SM_FREE:
1604 break;
1605
1606 case BFA_IOIM_SM_ABORT_DONE:
1607 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1608 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1609 ioim);
1610 break;
1611
1612 case BFA_IOIM_SM_ABORT_COMP:
1613 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1614 bfa_ioim_move_to_comp_q(ioim);
1615 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1616 ioim);
1617 break;
1618
1619 case BFA_IOIM_SM_COMP_UTAG:
1620 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1621 bfa_ioim_move_to_comp_q(ioim);
1622 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1623 ioim);
1624 break;
1625
1626 case BFA_IOIM_SM_CLEANUP:
Jing Huangd4b671c2010-12-26 21:46:35 -08001627 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001628 ioim->iosp->abort_explicit = BFA_FALSE;
1629
1630 if (bfa_ioim_send_abort(ioim))
1631 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1632 else {
1633 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1634 bfa_stats(ioim->itnim, qwait);
1635 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1636 &ioim->iosp->reqq_wait);
1637 }
1638 break;
1639
1640 case BFA_IOIM_SM_HWFAIL:
1641 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1642 bfa_ioim_move_to_comp_q(ioim);
1643 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1644 ioim);
1645 break;
1646
1647 default:
1648 bfa_sm_fault(ioim->bfa, event);
1649 }
1650}
1651
Jing Huang5fbe25c2010-10-18 17:17:23 -07001652/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001653 * IO is being cleaned up (implicit abort), waiting for completion from
1654 * firmware.
1655 */
1656static void
1657bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1658{
1659 bfa_trc(ioim->bfa, ioim->iotag);
1660 bfa_trc(ioim->bfa, event);
1661
1662 switch (event) {
1663 case BFA_IOIM_SM_COMP_GOOD:
1664 case BFA_IOIM_SM_COMP:
1665 case BFA_IOIM_SM_DONE:
1666 case BFA_IOIM_SM_FREE:
1667 break;
1668
1669 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001670 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001671 * IO is already being aborted implicitly
1672 */
1673 ioim->io_cbfn = __bfa_cb_ioim_abort;
1674 break;
1675
1676 case BFA_IOIM_SM_ABORT_DONE:
1677 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1678 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1679 bfa_ioim_notify_cleanup(ioim);
1680 break;
1681
1682 case BFA_IOIM_SM_ABORT_COMP:
1683 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1684 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1685 bfa_ioim_notify_cleanup(ioim);
1686 break;
1687
1688 case BFA_IOIM_SM_COMP_UTAG:
1689 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1690 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1691 bfa_ioim_notify_cleanup(ioim);
1692 break;
1693
1694 case BFA_IOIM_SM_HWFAIL:
1695 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1696 bfa_ioim_move_to_comp_q(ioim);
1697 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1698 ioim);
1699 break;
1700
1701 case BFA_IOIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001702 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001703 * IO can be in cleanup state already due to TM command.
1704 * 2nd cleanup request comes from ITN offline event.
1705 */
1706 break;
1707
1708 default:
1709 bfa_sm_fault(ioim->bfa, event);
1710 }
1711}
1712
Jing Huang5fbe25c2010-10-18 17:17:23 -07001713/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001714 * IO is waiting for room in request CQ
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001715 */
1716static void
1717bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1718{
1719 bfa_trc(ioim->bfa, ioim->iotag);
1720 bfa_trc(ioim->bfa, event);
1721
1722 switch (event) {
1723 case BFA_IOIM_SM_QRESUME:
1724 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1725 bfa_ioim_send_ioreq(ioim);
1726 break;
1727
1728 case BFA_IOIM_SM_ABORT:
1729 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1730 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1731 bfa_ioim_move_to_comp_q(ioim);
1732 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1733 ioim);
1734 break;
1735
1736 case BFA_IOIM_SM_CLEANUP:
1737 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1738 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1739 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1740 ioim);
1741 bfa_ioim_notify_cleanup(ioim);
1742 break;
1743
1744 case BFA_IOIM_SM_HWFAIL:
1745 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1746 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1747 bfa_ioim_move_to_comp_q(ioim);
1748 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1749 ioim);
1750 break;
1751
1752 default:
1753 bfa_sm_fault(ioim->bfa, event);
1754 }
1755}
1756
Jing Huang5fbe25c2010-10-18 17:17:23 -07001757/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001758 * Active IO is being aborted, waiting for room in request CQ.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001759 */
1760static void
1761bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1762{
1763 bfa_trc(ioim->bfa, ioim->iotag);
1764 bfa_trc(ioim->bfa, event);
1765
1766 switch (event) {
1767 case BFA_IOIM_SM_QRESUME:
1768 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1769 bfa_ioim_send_abort(ioim);
1770 break;
1771
1772 case BFA_IOIM_SM_CLEANUP:
Jing Huangd4b671c2010-12-26 21:46:35 -08001773 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001774 ioim->iosp->abort_explicit = BFA_FALSE;
1775 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1776 break;
1777
1778 case BFA_IOIM_SM_COMP_GOOD:
1779 case BFA_IOIM_SM_COMP:
1780 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1781 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1782 bfa_ioim_move_to_comp_q(ioim);
1783 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1784 ioim);
1785 break;
1786
1787 case BFA_IOIM_SM_DONE:
1788 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1789 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1790 bfa_ioim_move_to_comp_q(ioim);
1791 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1792 ioim);
1793 break;
1794
1795 case BFA_IOIM_SM_HWFAIL:
1796 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1797 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1798 bfa_ioim_move_to_comp_q(ioim);
1799 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1800 ioim);
1801 break;
1802
1803 default:
1804 bfa_sm_fault(ioim->bfa, event);
1805 }
1806}
1807
Jing Huang5fbe25c2010-10-18 17:17:23 -07001808/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001809 * Active IO is being cleaned up, waiting for room in request CQ.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001810 */
1811static void
1812bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1813{
1814 bfa_trc(ioim->bfa, ioim->iotag);
1815 bfa_trc(ioim->bfa, event);
1816
1817 switch (event) {
1818 case BFA_IOIM_SM_QRESUME:
1819 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1820 bfa_ioim_send_abort(ioim);
1821 break;
1822
1823 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001824 /*
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001825 * IO is already being cleaned up implicitly
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001826 */
1827 ioim->io_cbfn = __bfa_cb_ioim_abort;
1828 break;
1829
1830 case BFA_IOIM_SM_COMP_GOOD:
1831 case BFA_IOIM_SM_COMP:
1832 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1833 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1834 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1835 bfa_ioim_notify_cleanup(ioim);
1836 break;
1837
1838 case BFA_IOIM_SM_DONE:
1839 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1840 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1841 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1842 bfa_ioim_notify_cleanup(ioim);
1843 break;
1844
1845 case BFA_IOIM_SM_HWFAIL:
1846 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1847 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1848 bfa_ioim_move_to_comp_q(ioim);
1849 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1850 ioim);
1851 break;
1852
1853 default:
1854 bfa_sm_fault(ioim->bfa, event);
1855 }
1856}
1857
Jing Huang5fbe25c2010-10-18 17:17:23 -07001858/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001859 * IO bfa callback is pending.
1860 */
1861static void
1862bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1863{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001864 switch (event) {
1865 case BFA_IOIM_SM_HCB:
1866 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1867 bfa_ioim_free(ioim);
1868 break;
1869
1870 case BFA_IOIM_SM_CLEANUP:
1871 bfa_ioim_notify_cleanup(ioim);
1872 break;
1873
1874 case BFA_IOIM_SM_HWFAIL:
1875 break;
1876
1877 default:
1878 bfa_sm_fault(ioim->bfa, event);
1879 }
1880}
1881
Jing Huang5fbe25c2010-10-18 17:17:23 -07001882/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001883 * IO bfa callback is pending. IO resource cannot be freed.
1884 */
1885static void
1886bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1887{
1888 bfa_trc(ioim->bfa, ioim->iotag);
1889 bfa_trc(ioim->bfa, event);
1890
1891 switch (event) {
1892 case BFA_IOIM_SM_HCB:
1893 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
1894 list_del(&ioim->qe);
1895 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
1896 break;
1897
1898 case BFA_IOIM_SM_FREE:
1899 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1900 break;
1901
1902 case BFA_IOIM_SM_CLEANUP:
1903 bfa_ioim_notify_cleanup(ioim);
1904 break;
1905
1906 case BFA_IOIM_SM_HWFAIL:
1907 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1908 break;
1909
1910 default:
1911 bfa_sm_fault(ioim->bfa, event);
1912 }
1913}
1914
Jing Huang5fbe25c2010-10-18 17:17:23 -07001915/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001916 * IO is completed, waiting resource free from firmware.
1917 */
1918static void
1919bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1920{
1921 bfa_trc(ioim->bfa, ioim->iotag);
1922 bfa_trc(ioim->bfa, event);
1923
1924 switch (event) {
1925 case BFA_IOIM_SM_FREE:
1926 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1927 bfa_ioim_free(ioim);
1928 break;
1929
1930 case BFA_IOIM_SM_CLEANUP:
1931 bfa_ioim_notify_cleanup(ioim);
1932 break;
1933
1934 case BFA_IOIM_SM_HWFAIL:
1935 break;
1936
1937 default:
1938 bfa_sm_fault(ioim->bfa, event);
1939 }
1940}
1941
1942
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001943static void
1944__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
1945{
1946 struct bfa_ioim_s *ioim = cbarg;
1947
1948 if (!complete) {
1949 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1950 return;
1951 }
1952
1953 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
1954}
1955
1956static void
1957__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
1958{
1959 struct bfa_ioim_s *ioim = cbarg;
1960 struct bfi_ioim_rsp_s *m;
1961 u8 *snsinfo = NULL;
1962 u8 sns_len = 0;
1963 s32 residue = 0;
1964
1965 if (!complete) {
1966 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1967 return;
1968 }
1969
1970 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
1971 if (m->io_status == BFI_IOIM_STS_OK) {
Jing Huang5fbe25c2010-10-18 17:17:23 -07001972 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001973 * setup sense information, if present
1974 */
1975 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
1976 m->sns_len) {
1977 sns_len = m->sns_len;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001978 snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
1979 ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001980 }
1981
Jing Huang5fbe25c2010-10-18 17:17:23 -07001982 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001983 * setup residue value correctly for normal completions
1984 */
1985 if (m->resid_flags == FCP_RESID_UNDER) {
Jing Huangba816ea2010-10-18 17:10:50 -07001986 residue = be32_to_cpu(m->residue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001987 bfa_stats(ioim->itnim, iocomp_underrun);
1988 }
1989 if (m->resid_flags == FCP_RESID_OVER) {
Jing Huangba816ea2010-10-18 17:10:50 -07001990 residue = be32_to_cpu(m->residue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001991 residue = -residue;
1992 bfa_stats(ioim->itnim, iocomp_overrun);
1993 }
1994 }
1995
1996 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
1997 m->scsi_status, sns_len, snsinfo, residue);
1998}
1999
2000static void
2001__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2002{
2003 struct bfa_ioim_s *ioim = cbarg;
2004
2005 if (!complete) {
2006 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2007 return;
2008 }
2009
2010 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2011 0, 0, NULL, 0);
2012}
2013
2014static void
2015__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2016{
2017 struct bfa_ioim_s *ioim = cbarg;
2018
2019 bfa_stats(ioim->itnim, path_tov_expired);
2020 if (!complete) {
2021 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2022 return;
2023 }
2024
2025 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2026 0, 0, NULL, 0);
2027}
2028
2029static void
2030__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2031{
2032 struct bfa_ioim_s *ioim = cbarg;
2033
2034 if (!complete) {
2035 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2036 return;
2037 }
2038
2039 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2040}
2041
2042static void
2043bfa_ioim_sgpg_alloced(void *cbarg)
2044{
2045 struct bfa_ioim_s *ioim = cbarg;
2046
2047 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2048 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002049 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002050 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2051}
2052
Jing Huang5fbe25c2010-10-18 17:17:23 -07002053/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002054 * Send I/O request to firmware.
2055 */
2056static bfa_boolean_t
2057bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2058{
2059 struct bfa_itnim_s *itnim = ioim->itnim;
2060 struct bfi_ioim_req_s *m;
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002061 static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002062 struct bfi_sge_s *sge, *sgpge;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002063 u32 pgdlen = 0;
2064 u32 fcp_dl;
2065 u64 addr;
2066 struct scatterlist *sg;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002067 struct bfa_sgpg_s *sgpg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002068 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002069 u32 i, sge_id, pgcumsz;
Maggie Zhangf3148782010-12-09 19:11:39 -08002070 enum dma_data_direction dmadir;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002071
Jing Huang5fbe25c2010-10-18 17:17:23 -07002072 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002073 * check for room in queue to send request now
2074 */
2075 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2076 if (!m) {
2077 bfa_stats(ioim->itnim, qwait);
2078 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2079 &ioim->iosp->reqq_wait);
2080 return BFA_FALSE;
2081 }
2082
Jing Huang5fbe25c2010-10-18 17:17:23 -07002083 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002084 * build i/o request message next
2085 */
Jing Huangba816ea2010-10-18 17:10:50 -07002086 m->io_tag = cpu_to_be16(ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002087 m->rport_hdl = ioim->itnim->rport->fw_handle;
Maggie Zhangf3148782010-12-09 19:11:39 -08002088 m->io_timeout = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002089
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002090 sge = &m->sges[0];
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002091 sgpg = ioim->sgpg;
2092 sge_id = 0;
2093 sgpge = NULL;
2094 pgcumsz = 0;
2095 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2096 if (i == 0) {
2097 /* build inline IO SG element */
Maggie Zhangf16a1752010-12-09 19:12:32 -08002098 addr = bfa_sgaddr_le(sg_dma_address(sg));
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002099 sge->sga = *(union bfi_addr_u *) &addr;
2100 pgdlen = sg_dma_len(sg);
2101 sge->sg_len = pgdlen;
2102 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002103 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002104 bfa_sge_to_be(sge);
2105 sge++;
2106 } else {
2107 if (sge_id == 0)
2108 sgpge = sgpg->sgpg->sges;
2109
Maggie Zhangf16a1752010-12-09 19:12:32 -08002110 addr = bfa_sgaddr_le(sg_dma_address(sg));
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002111 sgpge->sga = *(union bfi_addr_u *) &addr;
2112 sgpge->sg_len = sg_dma_len(sg);
2113 pgcumsz += sgpge->sg_len;
2114
2115 /* set flags */
2116 if (i < (ioim->nsges - 1) &&
2117 sge_id < (BFI_SGPG_DATA_SGES - 1))
2118 sgpge->flags = BFI_SGE_DATA;
2119 else if (i < (ioim->nsges - 1))
2120 sgpge->flags = BFI_SGE_DATA_CPL;
2121 else
2122 sgpge->flags = BFI_SGE_DATA_LAST;
2123
2124 bfa_sge_to_le(sgpge);
2125
2126 sgpge++;
2127 if (i == (ioim->nsges - 1)) {
2128 sgpge->flags = BFI_SGE_PGDLEN;
2129 sgpge->sga.a32.addr_lo = 0;
2130 sgpge->sga.a32.addr_hi = 0;
2131 sgpge->sg_len = pgcumsz;
2132 bfa_sge_to_le(sgpge);
2133 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2134 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2135 sgpge->flags = BFI_SGE_LINK;
2136 sgpge->sga = sgpg->sgpg_pa;
2137 sgpge->sg_len = pgcumsz;
2138 bfa_sge_to_le(sgpge);
2139 sge_id = 0;
2140 pgcumsz = 0;
2141 }
2142 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002143 }
2144
2145 if (ioim->nsges > BFI_SGE_INLINE) {
2146 sge->sga = ioim->sgpg->sgpg_pa;
2147 } else {
2148 sge->sga.a32.addr_lo = 0;
2149 sge->sga.a32.addr_hi = 0;
2150 }
2151 sge->sg_len = pgdlen;
2152 sge->flags = BFI_SGE_PGDLEN;
2153 bfa_sge_to_be(sge);
2154
Jing Huang5fbe25c2010-10-18 17:17:23 -07002155 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002156 * set up I/O command parameters
2157 */
Jing Huang6a18b162010-10-18 17:08:54 -07002158 m->cmnd = cmnd_z0;
Maggie Zhangf3148782010-12-09 19:11:39 -08002159 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2160 dmadir = cmnd->sc_data_direction;
2161 if (dmadir == DMA_TO_DEVICE)
2162 m->cmnd.iodir = FCP_IODIR_WRITE;
2163 else if (dmadir == DMA_FROM_DEVICE)
2164 m->cmnd.iodir = FCP_IODIR_READ;
2165 else
2166 m->cmnd.iodir = FCP_IODIR_NONE;
2167
Jing Huang8f4bfad2010-12-26 21:50:10 -08002168 m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
Maggie Zhangf3148782010-12-09 19:11:39 -08002169 fcp_dl = scsi_bufflen(cmnd);
Jing Huangba816ea2010-10-18 17:10:50 -07002170 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002171
Jing Huang5fbe25c2010-10-18 17:17:23 -07002172 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002173 * set up I/O message header
2174 */
2175 switch (m->cmnd.iodir) {
2176 case FCP_IODIR_READ:
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002177 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002178 bfa_stats(itnim, input_reqs);
2179 ioim->itnim->stats.rd_throughput += fcp_dl;
2180 break;
2181 case FCP_IODIR_WRITE:
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002182 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002183 bfa_stats(itnim, output_reqs);
2184 ioim->itnim->stats.wr_throughput += fcp_dl;
2185 break;
2186 case FCP_IODIR_RW:
2187 bfa_stats(itnim, input_reqs);
2188 bfa_stats(itnim, output_reqs);
2189 default:
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002190 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002191 }
2192 if (itnim->seq_rec ||
Maggie Zhangf3148782010-12-09 19:11:39 -08002193 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002194 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002195
Jing Huang5fbe25c2010-10-18 17:17:23 -07002196 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002197 * queue I/O message to firmware
2198 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002199 bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002200 return BFA_TRUE;
2201}
2202
Jing Huang5fbe25c2010-10-18 17:17:23 -07002203/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002204 * Setup any additional SG pages needed.Inline SG element is setup
2205 * at queuing time.
2206 */
2207static bfa_boolean_t
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002208bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002209{
2210 u16 nsgpgs;
2211
Jing Huangd4b671c2010-12-26 21:46:35 -08002212 WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002213
Jing Huang5fbe25c2010-10-18 17:17:23 -07002214 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002215 * allocate SG pages needed
2216 */
2217 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2218 if (!nsgpgs)
2219 return BFA_TRUE;
2220
2221 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2222 != BFA_STATUS_OK) {
2223 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2224 return BFA_FALSE;
2225 }
2226
2227 ioim->nsgpgs = nsgpgs;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002228 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002229
2230 return BFA_TRUE;
2231}
2232
Jing Huang5fbe25c2010-10-18 17:17:23 -07002233/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002234 * Send I/O abort request to firmware.
2235 */
2236static bfa_boolean_t
2237bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2238{
2239 struct bfi_ioim_abort_req_s *m;
2240 enum bfi_ioim_h2i msgop;
2241
Jing Huang5fbe25c2010-10-18 17:17:23 -07002242 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002243 * check for room in queue to send request now
2244 */
2245 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2246 if (!m)
2247 return BFA_FALSE;
2248
Jing Huang5fbe25c2010-10-18 17:17:23 -07002249 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002250 * build i/o request message next
2251 */
2252 if (ioim->iosp->abort_explicit)
2253 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2254 else
2255 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2256
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002257 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
Jing Huangba816ea2010-10-18 17:10:50 -07002258 m->io_tag = cpu_to_be16(ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002259 m->abort_tag = ++ioim->abort_tag;
2260
Jing Huang5fbe25c2010-10-18 17:17:23 -07002261 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002262 * queue I/O message to firmware
2263 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002264 bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002265 return BFA_TRUE;
2266}
2267
Jing Huang5fbe25c2010-10-18 17:17:23 -07002268/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002269 * Call to resume any I/O requests waiting for room in request queue.
2270 */
2271static void
2272bfa_ioim_qresume(void *cbarg)
2273{
2274 struct bfa_ioim_s *ioim = cbarg;
2275
2276 bfa_stats(ioim->itnim, qresumes);
2277 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2278}
2279
2280
2281static void
2282bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2283{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002284 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002285 * Move IO from itnim queue to fcpim global queue since itnim will be
2286 * freed.
2287 */
2288 list_del(&ioim->qe);
2289 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2290
2291 if (!ioim->iosp->tskim) {
2292 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2293 bfa_cb_dequeue(&ioim->hcb_qe);
2294 list_del(&ioim->qe);
2295 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2296 }
2297 bfa_itnim_iodone(ioim->itnim);
2298 } else
Maggie Zhangf7f73812010-12-09 19:08:43 -08002299 bfa_wc_down(&ioim->iosp->tskim->wc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002300}
2301
2302static bfa_boolean_t
2303bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2304{
2305 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2306 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2307 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2308 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2309 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2310 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2311 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2312 return BFA_FALSE;
2313
2314 return BFA_TRUE;
2315}
2316
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002317void
2318bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2319{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002320 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002321 * If path tov timer expired, failback with PATHTOV status - these
2322 * IO requests are not normally retried by IO stack.
2323 *
2324 * Otherwise device cameback online and fail it with normal failed
2325 * status so that IO stack retries these failed IO requests.
2326 */
2327 if (iotov)
2328 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2329 else {
2330 ioim->io_cbfn = __bfa_cb_ioim_failed;
2331 bfa_stats(ioim->itnim, iocom_nexus_abort);
2332 }
2333 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2334
Jing Huang5fbe25c2010-10-18 17:17:23 -07002335 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002336 * Move IO to fcpim global queue since itnim will be
2337 * freed.
2338 */
2339 list_del(&ioim->qe);
2340 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2341}
2342
2343
Jing Huang5fbe25c2010-10-18 17:17:23 -07002344/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002345 * Memory allocation and initialization.
2346 */
2347void
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002348bfa_ioim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002349{
2350 struct bfa_ioim_s *ioim;
2351 struct bfa_ioim_sp_s *iosp;
2352 u16 i;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002353
Jing Huang5fbe25c2010-10-18 17:17:23 -07002354 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002355 * claim memory first
2356 */
2357 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
2358 fcpim->ioim_arr = ioim;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002359 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002360
2361 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
2362 fcpim->ioim_sp_arr = iosp;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002363 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002364
Jing Huang5fbe25c2010-10-18 17:17:23 -07002365 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002366 * Initialize ioim free queues
2367 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002368 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2369 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2370
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002371 for (i = 0; i < fcpim->fcp->num_ioim_reqs;
2372 i++, ioim++, iosp++) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002373 /*
2374 * initialize IOIM
2375 */
Jing Huang6a18b162010-10-18 17:08:54 -07002376 memset(ioim, 0, sizeof(struct bfa_ioim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002377 ioim->iotag = i;
2378 ioim->bfa = fcpim->bfa;
2379 ioim->fcpim = fcpim;
2380 ioim->iosp = iosp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002381 INIT_LIST_HEAD(&ioim->sgpg_q);
2382 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2383 bfa_ioim_qresume, ioim);
2384 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2385 bfa_ioim_sgpg_alloced, ioim);
2386 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002387 }
2388}
2389
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002390void
2391bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2392{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002393 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002394 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2395 struct bfa_ioim_s *ioim;
2396 u16 iotag;
2397 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2398
Jing Huangba816ea2010-10-18 17:10:50 -07002399 iotag = be16_to_cpu(rsp->io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002400
2401 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
Jing Huangd4b671c2010-12-26 21:46:35 -08002402 WARN_ON(ioim->iotag != iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002403
2404 bfa_trc(ioim->bfa, ioim->iotag);
2405 bfa_trc(ioim->bfa, rsp->io_status);
2406 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2407
2408 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
Jing Huang6a18b162010-10-18 17:08:54 -07002409 ioim->iosp->comp_rspmsg = *m;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002410
2411 switch (rsp->io_status) {
2412 case BFI_IOIM_STS_OK:
2413 bfa_stats(ioim->itnim, iocomp_ok);
2414 if (rsp->reuse_io_tag == 0)
2415 evt = BFA_IOIM_SM_DONE;
2416 else
2417 evt = BFA_IOIM_SM_COMP;
2418 break;
2419
2420 case BFI_IOIM_STS_TIMEDOUT:
2421 bfa_stats(ioim->itnim, iocomp_timedout);
2422 case BFI_IOIM_STS_ABORTED:
2423 rsp->io_status = BFI_IOIM_STS_ABORTED;
2424 bfa_stats(ioim->itnim, iocomp_aborted);
2425 if (rsp->reuse_io_tag == 0)
2426 evt = BFA_IOIM_SM_DONE;
2427 else
2428 evt = BFA_IOIM_SM_COMP;
2429 break;
2430
2431 case BFI_IOIM_STS_PROTO_ERR:
2432 bfa_stats(ioim->itnim, iocom_proto_err);
Jing Huangd4b671c2010-12-26 21:46:35 -08002433 WARN_ON(!rsp->reuse_io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002434 evt = BFA_IOIM_SM_COMP;
2435 break;
2436
2437 case BFI_IOIM_STS_SQER_NEEDED:
2438 bfa_stats(ioim->itnim, iocom_sqer_needed);
Jing Huangd4b671c2010-12-26 21:46:35 -08002439 WARN_ON(rsp->reuse_io_tag != 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002440 evt = BFA_IOIM_SM_SQRETRY;
2441 break;
2442
2443 case BFI_IOIM_STS_RES_FREE:
2444 bfa_stats(ioim->itnim, iocom_res_free);
2445 evt = BFA_IOIM_SM_FREE;
2446 break;
2447
2448 case BFI_IOIM_STS_HOST_ABORTED:
2449 bfa_stats(ioim->itnim, iocom_hostabrts);
2450 if (rsp->abort_tag != ioim->abort_tag) {
2451 bfa_trc(ioim->bfa, rsp->abort_tag);
2452 bfa_trc(ioim->bfa, ioim->abort_tag);
2453 return;
2454 }
2455
2456 if (rsp->reuse_io_tag)
2457 evt = BFA_IOIM_SM_ABORT_COMP;
2458 else
2459 evt = BFA_IOIM_SM_ABORT_DONE;
2460 break;
2461
2462 case BFI_IOIM_STS_UTAG:
2463 bfa_stats(ioim->itnim, iocom_utags);
2464 evt = BFA_IOIM_SM_COMP_UTAG;
2465 break;
2466
2467 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08002468 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002469 }
2470
2471 bfa_sm_send_event(ioim, evt);
2472}
2473
2474void
2475bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2476{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002477 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002478 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2479 struct bfa_ioim_s *ioim;
2480 u16 iotag;
2481
Jing Huangba816ea2010-10-18 17:10:50 -07002482 iotag = be16_to_cpu(rsp->io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002483
2484 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
Jing Huangd4b671c2010-12-26 21:46:35 -08002485 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002486
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002487 bfa_ioim_cb_profile_comp(fcpim, ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002488 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2489}
2490
Jing Huang5fbe25c2010-10-18 17:17:23 -07002491/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002492 * Called by itnim to clean up IO while going offline.
2493 */
2494void
2495bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2496{
2497 bfa_trc(ioim->bfa, ioim->iotag);
2498 bfa_stats(ioim->itnim, io_cleanups);
2499
2500 ioim->iosp->tskim = NULL;
2501 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2502}
2503
2504void
2505bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2506{
2507 bfa_trc(ioim->bfa, ioim->iotag);
2508 bfa_stats(ioim->itnim, io_tmaborts);
2509
2510 ioim->iosp->tskim = tskim;
2511 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2512}
2513
Jing Huang5fbe25c2010-10-18 17:17:23 -07002514/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002515 * IOC failure handling.
2516 */
2517void
2518bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2519{
2520 bfa_trc(ioim->bfa, ioim->iotag);
2521 bfa_stats(ioim->itnim, io_iocdowns);
2522 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2523}
2524
Jing Huang5fbe25c2010-10-18 17:17:23 -07002525/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002526 * IO offline TOV popped. Fail the pending IO.
2527 */
2528void
2529bfa_ioim_tov(struct bfa_ioim_s *ioim)
2530{
2531 bfa_trc(ioim->bfa, ioim->iotag);
2532 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2533}
2534
2535
Jing Huang5fbe25c2010-10-18 17:17:23 -07002536/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002537 * Allocate IOIM resource for initiator mode I/O request.
2538 */
2539struct bfa_ioim_s *
2540bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2541 struct bfa_itnim_s *itnim, u16 nsges)
2542{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002543 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002544 struct bfa_ioim_s *ioim;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002545 struct bfa_iotag_s *iotag = NULL;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002546
Jing Huang5fbe25c2010-10-18 17:17:23 -07002547 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002548 * alocate IOIM resource
2549 */
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002550 bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
2551 if (!iotag) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002552 bfa_stats(itnim, no_iotags);
2553 return NULL;
2554 }
2555
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002556 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
2557
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002558 ioim->dio = dio;
2559 ioim->itnim = itnim;
2560 ioim->nsges = nsges;
2561 ioim->nsgpgs = 0;
2562
2563 bfa_stats(itnim, total_ios);
2564 fcpim->ios_active++;
2565
2566 list_add_tail(&ioim->qe, &itnim->io_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002567
2568 return ioim;
2569}
2570
2571void
2572bfa_ioim_free(struct bfa_ioim_s *ioim)
2573{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002574 struct bfa_fcpim_s *fcpim = ioim->fcpim;
2575 struct bfa_iotag_s *iotag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002576
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002577 if (ioim->nsgpgs > 0)
2578 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2579
2580 bfa_stats(ioim->itnim, io_comps);
2581 fcpim->ios_active--;
2582
Krishna Gudipati15821f02010-12-13 16:23:27 -08002583 ioim->iotag &= BFA_IOIM_IOTAG_MASK;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002584
2585 WARN_ON(!(ioim->iotag <
2586 (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
2587 iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
2588
2589 if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
2590 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
2591 else
2592 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
2593
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002594 list_del(&ioim->qe);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002595}
2596
2597void
2598bfa_ioim_start(struct bfa_ioim_s *ioim)
2599{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002600 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2601
Jing Huang5fbe25c2010-10-18 17:17:23 -07002602 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002603 * Obtain the queue over which this request has to be issued
2604 */
2605 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
Maggie Zhangf3148782010-12-09 19:11:39 -08002606 BFA_FALSE : bfa_itnim_get_reqq(ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002607
2608 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2609}
2610
Jing Huang5fbe25c2010-10-18 17:17:23 -07002611/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002612 * Driver I/O abort request.
2613 */
2614bfa_status_t
2615bfa_ioim_abort(struct bfa_ioim_s *ioim)
2616{
2617
2618 bfa_trc(ioim->bfa, ioim->iotag);
2619
2620 if (!bfa_ioim_is_abortable(ioim))
2621 return BFA_STATUS_FAILED;
2622
2623 bfa_stats(ioim->itnim, io_aborts);
2624 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2625
2626 return BFA_STATUS_OK;
2627}
2628
Jing Huang5fbe25c2010-10-18 17:17:23 -07002629/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002630 * BFA TSKIM state machine functions
2631 */
2632
Jing Huang5fbe25c2010-10-18 17:17:23 -07002633/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002634 * Task management command beginning state.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002635 */
2636static void
2637bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2638{
2639 bfa_trc(tskim->bfa, event);
2640
2641 switch (event) {
2642 case BFA_TSKIM_SM_START:
2643 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2644 bfa_tskim_gather_ios(tskim);
2645
Jing Huang5fbe25c2010-10-18 17:17:23 -07002646 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002647 * If device is offline, do not send TM on wire. Just cleanup
2648 * any pending IO requests and complete TM request.
2649 */
2650 if (!bfa_itnim_is_online(tskim->itnim)) {
2651 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2652 tskim->tsk_status = BFI_TSKIM_STS_OK;
2653 bfa_tskim_cleanup_ios(tskim);
2654 return;
2655 }
2656
2657 if (!bfa_tskim_send(tskim)) {
2658 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2659 bfa_stats(tskim->itnim, tm_qwait);
2660 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2661 &tskim->reqq_wait);
2662 }
2663 break;
2664
2665 default:
2666 bfa_sm_fault(tskim->bfa, event);
2667 }
2668}
2669
Jing Huang5fbe25c2010-10-18 17:17:23 -07002670/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002671 * TM command is active, awaiting completion from firmware to
2672 * cleanup IO requests in TM scope.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002673 */
2674static void
2675bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2676{
2677 bfa_trc(tskim->bfa, event);
2678
2679 switch (event) {
2680 case BFA_TSKIM_SM_DONE:
2681 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2682 bfa_tskim_cleanup_ios(tskim);
2683 break;
2684
2685 case BFA_TSKIM_SM_CLEANUP:
2686 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2687 if (!bfa_tskim_send_abort(tskim)) {
2688 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
2689 bfa_stats(tskim->itnim, tm_qwait);
2690 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2691 &tskim->reqq_wait);
2692 }
2693 break;
2694
2695 case BFA_TSKIM_SM_HWFAIL:
2696 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2697 bfa_tskim_iocdisable_ios(tskim);
2698 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2699 break;
2700
2701 default:
2702 bfa_sm_fault(tskim->bfa, event);
2703 }
2704}
2705
Jing Huang5fbe25c2010-10-18 17:17:23 -07002706/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002707 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
2708 * completion event from firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002709 */
2710static void
2711bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2712{
2713 bfa_trc(tskim->bfa, event);
2714
2715 switch (event) {
2716 case BFA_TSKIM_SM_DONE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002717 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002718 * Ignore and wait for ABORT completion from firmware.
2719 */
2720 break;
2721
2722 case BFA_TSKIM_SM_CLEANUP_DONE:
2723 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2724 bfa_tskim_cleanup_ios(tskim);
2725 break;
2726
2727 case BFA_TSKIM_SM_HWFAIL:
2728 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2729 bfa_tskim_iocdisable_ios(tskim);
2730 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2731 break;
2732
2733 default:
2734 bfa_sm_fault(tskim->bfa, event);
2735 }
2736}
2737
2738static void
2739bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2740{
2741 bfa_trc(tskim->bfa, event);
2742
2743 switch (event) {
2744 case BFA_TSKIM_SM_IOS_DONE:
2745 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2746 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
2747 break;
2748
2749 case BFA_TSKIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002750 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002751 * Ignore, TM command completed on wire.
2752 * Notify TM conmpletion on IO cleanup completion.
2753 */
2754 break;
2755
2756 case BFA_TSKIM_SM_HWFAIL:
2757 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2758 bfa_tskim_iocdisable_ios(tskim);
2759 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2760 break;
2761
2762 default:
2763 bfa_sm_fault(tskim->bfa, event);
2764 }
2765}
2766
Jing Huang5fbe25c2010-10-18 17:17:23 -07002767/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002768 * Task management command is waiting for room in request CQ
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002769 */
2770static void
2771bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2772{
2773 bfa_trc(tskim->bfa, event);
2774
2775 switch (event) {
2776 case BFA_TSKIM_SM_QRESUME:
2777 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2778 bfa_tskim_send(tskim);
2779 break;
2780
2781 case BFA_TSKIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002782 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002783 * No need to send TM on wire since ITN is offline.
2784 */
2785 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2786 bfa_reqq_wcancel(&tskim->reqq_wait);
2787 bfa_tskim_cleanup_ios(tskim);
2788 break;
2789
2790 case BFA_TSKIM_SM_HWFAIL:
2791 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2792 bfa_reqq_wcancel(&tskim->reqq_wait);
2793 bfa_tskim_iocdisable_ios(tskim);
2794 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2795 break;
2796
2797 default:
2798 bfa_sm_fault(tskim->bfa, event);
2799 }
2800}
2801
Jing Huang5fbe25c2010-10-18 17:17:23 -07002802/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002803 * Task management command is active, awaiting for room in request CQ
2804 * to send clean up request.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002805 */
2806static void
2807bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
2808 enum bfa_tskim_event event)
2809{
2810 bfa_trc(tskim->bfa, event);
2811
2812 switch (event) {
2813 case BFA_TSKIM_SM_DONE:
2814 bfa_reqq_wcancel(&tskim->reqq_wait);
Jing Huang5fbe25c2010-10-18 17:17:23 -07002815 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002816 * Fall through !!!
2817 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002818 case BFA_TSKIM_SM_QRESUME:
2819 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2820 bfa_tskim_send_abort(tskim);
2821 break;
2822
2823 case BFA_TSKIM_SM_HWFAIL:
2824 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2825 bfa_reqq_wcancel(&tskim->reqq_wait);
2826 bfa_tskim_iocdisable_ios(tskim);
2827 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2828 break;
2829
2830 default:
2831 bfa_sm_fault(tskim->bfa, event);
2832 }
2833}
2834
Jing Huang5fbe25c2010-10-18 17:17:23 -07002835/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002836 * BFA callback is pending
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002837 */
2838static void
2839bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2840{
2841 bfa_trc(tskim->bfa, event);
2842
2843 switch (event) {
2844 case BFA_TSKIM_SM_HCB:
2845 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
2846 bfa_tskim_free(tskim);
2847 break;
2848
2849 case BFA_TSKIM_SM_CLEANUP:
2850 bfa_tskim_notify_comp(tskim);
2851 break;
2852
2853 case BFA_TSKIM_SM_HWFAIL:
2854 break;
2855
2856 default:
2857 bfa_sm_fault(tskim->bfa, event);
2858 }
2859}
2860
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002861static void
2862__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
2863{
2864 struct bfa_tskim_s *tskim = cbarg;
2865
2866 if (!complete) {
2867 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2868 return;
2869 }
2870
2871 bfa_stats(tskim->itnim, tm_success);
2872 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
2873}
2874
2875static void
2876__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
2877{
2878 struct bfa_tskim_s *tskim = cbarg;
2879
2880 if (!complete) {
2881 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2882 return;
2883 }
2884
2885 bfa_stats(tskim->itnim, tm_failures);
2886 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
2887 BFI_TSKIM_STS_FAILED);
2888}
2889
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002890static bfa_boolean_t
Maggie Zhangf3148782010-12-09 19:11:39 -08002891bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002892{
2893 switch (tskim->tm_cmnd) {
2894 case FCP_TM_TARGET_RESET:
2895 return BFA_TRUE;
2896
2897 case FCP_TM_ABORT_TASK_SET:
2898 case FCP_TM_CLEAR_TASK_SET:
2899 case FCP_TM_LUN_RESET:
2900 case FCP_TM_CLEAR_ACA:
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002901 return !memcmp(&tskim->lun, &lun, sizeof(lun));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002902
2903 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08002904 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002905 }
2906
2907 return BFA_FALSE;
2908}
2909
Jing Huang5fbe25c2010-10-18 17:17:23 -07002910/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002911 * Gather affected IO requests and task management commands.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002912 */
2913static void
2914bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
2915{
2916 struct bfa_itnim_s *itnim = tskim->itnim;
2917 struct bfa_ioim_s *ioim;
Maggie Zhangf3148782010-12-09 19:11:39 -08002918 struct list_head *qe, *qen;
2919 struct scsi_cmnd *cmnd;
2920 struct scsi_lun scsilun;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002921
2922 INIT_LIST_HEAD(&tskim->io_q);
2923
Jing Huang5fbe25c2010-10-18 17:17:23 -07002924 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002925 * Gather any active IO requests first.
2926 */
2927 list_for_each_safe(qe, qen, &itnim->io_q) {
2928 ioim = (struct bfa_ioim_s *) qe;
Maggie Zhangf3148782010-12-09 19:11:39 -08002929 cmnd = (struct scsi_cmnd *) ioim->dio;
2930 int_to_scsilun(cmnd->device->lun, &scsilun);
2931 if (bfa_tskim_match_scope(tskim, scsilun)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002932 list_del(&ioim->qe);
2933 list_add_tail(&ioim->qe, &tskim->io_q);
2934 }
2935 }
2936
Jing Huang5fbe25c2010-10-18 17:17:23 -07002937 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002938 * Failback any pending IO requests immediately.
2939 */
2940 list_for_each_safe(qe, qen, &itnim->pending_q) {
2941 ioim = (struct bfa_ioim_s *) qe;
Maggie Zhangf3148782010-12-09 19:11:39 -08002942 cmnd = (struct scsi_cmnd *) ioim->dio;
2943 int_to_scsilun(cmnd->device->lun, &scsilun);
2944 if (bfa_tskim_match_scope(tskim, scsilun)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002945 list_del(&ioim->qe);
2946 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2947 bfa_ioim_tov(ioim);
2948 }
2949 }
2950}
2951
Jing Huang5fbe25c2010-10-18 17:17:23 -07002952/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002953 * IO cleanup completion
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002954 */
2955static void
2956bfa_tskim_cleanp_comp(void *tskim_cbarg)
2957{
2958 struct bfa_tskim_s *tskim = tskim_cbarg;
2959
2960 bfa_stats(tskim->itnim, tm_io_comps);
2961 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
2962}
2963
Jing Huang5fbe25c2010-10-18 17:17:23 -07002964/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002965 * Gather affected IO requests and task management commands.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002966 */
2967static void
2968bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
2969{
2970 struct bfa_ioim_s *ioim;
2971 struct list_head *qe, *qen;
2972
2973 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
2974
2975 list_for_each_safe(qe, qen, &tskim->io_q) {
2976 ioim = (struct bfa_ioim_s *) qe;
2977 bfa_wc_up(&tskim->wc);
2978 bfa_ioim_cleanup_tm(ioim, tskim);
2979 }
2980
2981 bfa_wc_wait(&tskim->wc);
2982}
2983
Jing Huang5fbe25c2010-10-18 17:17:23 -07002984/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002985 * Send task management request to firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002986 */
2987static bfa_boolean_t
2988bfa_tskim_send(struct bfa_tskim_s *tskim)
2989{
2990 struct bfa_itnim_s *itnim = tskim->itnim;
2991 struct bfi_tskim_req_s *m;
2992
Jing Huang5fbe25c2010-10-18 17:17:23 -07002993 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002994 * check for room in queue to send request now
2995 */
2996 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
2997 if (!m)
2998 return BFA_FALSE;
2999
Jing Huang5fbe25c2010-10-18 17:17:23 -07003000 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003001 * build i/o request message next
3002 */
3003 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003004 bfa_fn_lpu(tskim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003005
Jing Huangba816ea2010-10-18 17:10:50 -07003006 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003007 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3008 m->t_secs = tskim->tsecs;
3009 m->lun = tskim->lun;
3010 m->tm_flags = tskim->tm_cmnd;
3011
Jing Huang5fbe25c2010-10-18 17:17:23 -07003012 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003013 * queue I/O message to firmware
3014 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003015 bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003016 return BFA_TRUE;
3017}
3018
Jing Huang5fbe25c2010-10-18 17:17:23 -07003019/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003020 * Send abort request to cleanup an active TM to firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003021 */
3022static bfa_boolean_t
3023bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3024{
3025 struct bfa_itnim_s *itnim = tskim->itnim;
3026 struct bfi_tskim_abortreq_s *m;
3027
Jing Huang5fbe25c2010-10-18 17:17:23 -07003028 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003029 * check for room in queue to send request now
3030 */
3031 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3032 if (!m)
3033 return BFA_FALSE;
3034
Jing Huang5fbe25c2010-10-18 17:17:23 -07003035 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003036 * build i/o request message next
3037 */
3038 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003039 bfa_fn_lpu(tskim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003040
Jing Huangba816ea2010-10-18 17:10:50 -07003041 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003042
Jing Huang5fbe25c2010-10-18 17:17:23 -07003043 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003044 * queue I/O message to firmware
3045 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003046 bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003047 return BFA_TRUE;
3048}
3049
Jing Huang5fbe25c2010-10-18 17:17:23 -07003050/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003051 * Call to resume task management cmnd waiting for room in request queue.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003052 */
3053static void
3054bfa_tskim_qresume(void *cbarg)
3055{
3056 struct bfa_tskim_s *tskim = cbarg;
3057
3058 bfa_stats(tskim->itnim, tm_qresumes);
3059 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3060}
3061
Jing Huang5fbe25c2010-10-18 17:17:23 -07003062/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003063 * Cleanup IOs associated with a task mangement command on IOC failures.
3064 */
3065static void
3066bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3067{
3068 struct bfa_ioim_s *ioim;
3069 struct list_head *qe, *qen;
3070
3071 list_for_each_safe(qe, qen, &tskim->io_q) {
3072 ioim = (struct bfa_ioim_s *) qe;
3073 bfa_ioim_iocdisable(ioim);
3074 }
3075}
3076
Jing Huang5fbe25c2010-10-18 17:17:23 -07003077/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003078 * Notification on completions from related ioim.
3079 */
3080void
3081bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3082{
3083 bfa_wc_down(&tskim->wc);
3084}
3085
Jing Huang5fbe25c2010-10-18 17:17:23 -07003086/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003087 * Handle IOC h/w failure notification from itnim.
3088 */
3089void
3090bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3091{
3092 tskim->notify = BFA_FALSE;
3093 bfa_stats(tskim->itnim, tm_iocdowns);
3094 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3095}
3096
Jing Huang5fbe25c2010-10-18 17:17:23 -07003097/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003098 * Cleanup TM command and associated IOs as part of ITNIM offline.
3099 */
3100void
3101bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3102{
3103 tskim->notify = BFA_TRUE;
3104 bfa_stats(tskim->itnim, tm_cleanups);
3105 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3106}
3107
Jing Huang5fbe25c2010-10-18 17:17:23 -07003108/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003109 * Memory allocation and initialization.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003110 */
3111void
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003112bfa_tskim_attach(struct bfa_fcpim_s *fcpim, struct bfa_meminfo_s *minfo)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003113{
3114 struct bfa_tskim_s *tskim;
3115 u16 i;
3116
3117 INIT_LIST_HEAD(&fcpim->tskim_free_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003118 INIT_LIST_HEAD(&fcpim->tskim_unused_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003119
3120 tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
3121 fcpim->tskim_arr = tskim;
3122
3123 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3124 /*
3125 * initialize TSKIM
3126 */
Jing Huang6a18b162010-10-18 17:08:54 -07003127 memset(tskim, 0, sizeof(struct bfa_tskim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003128 tskim->tsk_tag = i;
3129 tskim->bfa = fcpim->bfa;
3130 tskim->fcpim = fcpim;
3131 tskim->notify = BFA_FALSE;
3132 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3133 tskim);
3134 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3135
3136 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3137 }
3138
3139 bfa_meminfo_kva(minfo) = (u8 *) tskim;
3140}
3141
3142void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003143bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3144{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003145 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003146 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3147 struct bfa_tskim_s *tskim;
Jing Huangba816ea2010-10-18 17:10:50 -07003148 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003149
3150 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
Jing Huangd4b671c2010-12-26 21:46:35 -08003151 WARN_ON(tskim->tsk_tag != tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003152
3153 tskim->tsk_status = rsp->tsk_status;
3154
Jing Huang5fbe25c2010-10-18 17:17:23 -07003155 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003156 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3157 * requests. All other statuses are for normal completions.
3158 */
3159 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3160 bfa_stats(tskim->itnim, tm_cleanup_comps);
3161 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3162 } else {
3163 bfa_stats(tskim->itnim, tm_fw_rsps);
3164 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3165 }
3166}
3167
3168
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003169struct bfa_tskim_s *
3170bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3171{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003172 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003173 struct bfa_tskim_s *tskim;
3174
3175 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3176
3177 if (tskim)
3178 tskim->dtsk = dtsk;
3179
3180 return tskim;
3181}
3182
3183void
3184bfa_tskim_free(struct bfa_tskim_s *tskim)
3185{
Jing Huangd4b671c2010-12-26 21:46:35 -08003186 WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003187 list_del(&tskim->qe);
3188 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3189}
3190
Jing Huang5fbe25c2010-10-18 17:17:23 -07003191/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003192 * Start a task management command.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003193 *
3194 * @param[in] tskim BFA task management command instance
3195 * @param[in] itnim i-t nexus for the task management command
3196 * @param[in] lun lun, if applicable
3197 * @param[in] tm_cmnd Task management command code.
3198 * @param[in] t_secs Timeout in seconds
3199 *
3200 * @return None.
3201 */
3202void
Maggie Zhangf3148782010-12-09 19:11:39 -08003203bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3204 struct scsi_lun lun,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003205 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3206{
3207 tskim->itnim = itnim;
3208 tskim->lun = lun;
3209 tskim->tm_cmnd = tm_cmnd;
3210 tskim->tsecs = tsecs;
3211 tskim->notify = BFA_FALSE;
3212 bfa_stats(itnim, tm_cmnds);
3213
3214 list_add_tail(&tskim->qe, &itnim->tsk_q);
3215 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3216}
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003217
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003218void
3219bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
3220{
3221 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3222 struct list_head *qe;
3223 int i;
3224
3225 for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
3226 bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
3227 list_add_tail(qe, &fcpim->tskim_unused_q);
3228 }
3229}
3230
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003231/* BFA FCP module - parent module for fcpim */
3232
3233BFA_MODULE(fcp);
3234
3235static void
3236bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, u32 *dm_len)
3237{
3238 u16 num_io_req;
3239
3240 /*
3241 * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
3242 * So if the values are non zero, adjust them appropriately.
3243 */
3244 if (cfg->fwcfg.num_ioim_reqs &&
3245 cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
3246 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
3247 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
3248 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3249
3250 if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
3251 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3252
3253 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3254 if (num_io_req > BFA_IO_MAX) {
3255 if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
3256 cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
3257 cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
3258 } else if (cfg->fwcfg.num_fwtio_reqs)
3259 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3260 else
3261 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3262 }
3263
3264 bfa_fcpim_meminfo(cfg, km_len, dm_len);
3265
3266 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3267 *km_len += num_io_req * sizeof(struct bfa_iotag_s);
3268 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
3269 *dm_len += num_io_req * BFI_IOIM_SNSLEN;
3270}
3271
3272static void
3273bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
3274 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
3275{
3276 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3277 u32 snsbufsz;
3278
3279 fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3280 fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
3281 fcp->num_itns = cfg->fwcfg.num_rports;
3282 fcp->bfa = bfa;
3283
3284 snsbufsz = (fcp->num_ioim_reqs + fcp->num_fwtio_reqs) * BFI_IOIM_SNSLEN;
3285 fcp->snsbase.pa = bfa_meminfo_dma_phys(meminfo);
3286 bfa_meminfo_dma_phys(meminfo) += snsbufsz;
3287
3288 fcp->snsbase.kva = bfa_meminfo_dma_virt(meminfo);
3289 bfa_meminfo_dma_virt(meminfo) += snsbufsz;
3290 bfa_iocfc_set_snsbase(bfa, fcp->snsbase.pa);
3291
3292 bfa_fcpim_attach(fcp, bfad, cfg, meminfo, pcidev);
3293
3294 fcp->itn_arr = (struct bfa_itn_s *) bfa_meminfo_kva(meminfo);
3295 bfa_meminfo_kva(meminfo) = (u8 *)fcp->itn_arr +
3296 (fcp->num_itns * sizeof(struct bfa_itn_s));
3297 memset(fcp->itn_arr, 0,
3298 (fcp->num_itns * sizeof(struct bfa_itn_s)));
3299
3300 bfa_iotag_attach(fcp, meminfo);
3301}
3302
3303static void
3304bfa_fcp_detach(struct bfa_s *bfa)
3305{
3306}
3307
3308static void
3309bfa_fcp_start(struct bfa_s *bfa)
3310{
3311}
3312
3313static void
3314bfa_fcp_stop(struct bfa_s *bfa)
3315{
3316}
3317
3318static void
3319bfa_fcp_iocdisable(struct bfa_s *bfa)
3320{
3321 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3322
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003323 /* Enqueue unused ioim resources to free_q */
3324 list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
3325
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003326 bfa_fcpim_iocdisable(fcp);
3327}
3328
3329void
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003330bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
3331{
3332 struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
3333 struct list_head *qe;
3334 int i;
3335
3336 for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3337 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3338 list_add_tail(qe, &mod->iotag_unused_q);
3339 }
3340}
3341
3342void
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003343bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
3344 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
3345{
3346 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3347 struct bfa_itn_s *itn;
3348
3349 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
3350 itn->isr = isr;
3351}
3352
3353/*
3354 * Itn interrupt processing.
3355 */
3356void
3357bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3358{
3359 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3360 union bfi_itn_i2h_msg_u msg;
3361 struct bfa_itn_s *itn;
3362
3363 msg.msg = m;
3364 itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
3365
3366 if (itn->isr)
3367 itn->isr(bfa, m);
3368 else
3369 WARN_ON(1);
3370}
3371
3372void
3373bfa_iotag_attach(struct bfa_fcp_mod_s *fcp, struct bfa_meminfo_s *minfo)
3374{
3375 struct bfa_iotag_s *iotag;
3376 u16 num_io_req, i;
3377
3378 iotag = (struct bfa_iotag_s *) bfa_meminfo_kva(minfo);
3379 fcp->iotag_arr = iotag;
3380
3381 INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
3382 INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003383 INIT_LIST_HEAD(&fcp->iotag_unused_q);
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003384
3385 num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
3386 for (i = 0; i < num_io_req; i++, iotag++) {
3387 memset(iotag, 0, sizeof(struct bfa_iotag_s));
3388 iotag->tag = i;
3389 if (i < fcp->num_ioim_reqs)
3390 list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
3391 else
3392 list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
3393 }
3394
3395 bfa_meminfo_kva(minfo) = (u8 *) iotag;
3396}