blob: 27eab36f89a56614ef58920b5ab91734810299bf [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_modules.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070020
21BFA_TRC_FILE(HAL, FCPIM);
Jing Huang7725ccf2009-09-23 17:46:15 -070022
Jing Huang5fbe25c2010-10-18 17:17:23 -070023/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070024 * BFA ITNIM Related definitions
25 */
26static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
27
28#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
29 (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
30
31#define bfa_fcpim_additn(__itnim) \
32 list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
33#define bfa_fcpim_delitn(__itnim) do { \
Jing Huangd4b671c2010-12-26 21:46:35 -080034 WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070035 bfa_itnim_update_del_itn_stats(__itnim); \
36 list_del(&(__itnim)->qe); \
Jing Huangd4b671c2010-12-26 21:46:35 -080037 WARN_ON(!list_empty(&(__itnim)->io_q)); \
38 WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
39 WARN_ON(!list_empty(&(__itnim)->pending_q)); \
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070040} while (0)
41
42#define bfa_itnim_online_cb(__itnim) do { \
43 if ((__itnim)->bfa->fcs) \
44 bfa_cb_itnim_online((__itnim)->ditn); \
45 else { \
46 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
47 __bfa_cb_itnim_online, (__itnim)); \
48 } \
49} while (0)
50
51#define bfa_itnim_offline_cb(__itnim) do { \
52 if ((__itnim)->bfa->fcs) \
53 bfa_cb_itnim_offline((__itnim)->ditn); \
54 else { \
55 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
56 __bfa_cb_itnim_offline, (__itnim)); \
57 } \
58} while (0)
59
60#define bfa_itnim_sler_cb(__itnim) do { \
61 if ((__itnim)->bfa->fcs) \
62 bfa_cb_itnim_sler((__itnim)->ditn); \
63 else { \
64 bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
65 __bfa_cb_itnim_sler, (__itnim)); \
66 } \
67} while (0)
68
Jing Huang5fbe25c2010-10-18 17:17:23 -070069/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -080070 * itnim state machine event
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070071 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070072enum bfa_itnim_event {
73 BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
74 BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
75 BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
76 BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
77 BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
78 BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
79 BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
80 BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
81 BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
82};
83
Jing Huang5fbe25c2010-10-18 17:17:23 -070084/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070085 * BFA IOIM related definitions
86 */
87#define bfa_ioim_move_to_comp_q(__ioim) do { \
88 list_del(&(__ioim)->qe); \
89 list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
90} while (0)
91
92
93#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
94 if ((__fcpim)->profile_comp) \
95 (__fcpim)->profile_comp(__ioim); \
96} while (0)
97
98#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
99 if ((__fcpim)->profile_start) \
100 (__fcpim)->profile_start(__ioim); \
101} while (0)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700102
Jing Huang5fbe25c2010-10-18 17:17:23 -0700103/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700104 * IO state machine events
105 */
106enum bfa_ioim_event {
107 BFA_IOIM_SM_START = 1, /* io start request from host */
108 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
109 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
110 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
111 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
112 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
113 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
114 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
115 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
116 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
117 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
118 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
119 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
120 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
121 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
122 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
123 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
124 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
125};
126
127
Jing Huang5fbe25c2010-10-18 17:17:23 -0700128/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700129 * BFA TSKIM related definitions
130 */
131
Jing Huang5fbe25c2010-10-18 17:17:23 -0700132/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700133 * task management completion handling
134 */
135#define bfa_tskim_qcomp(__tskim, __cbfn) do { \
136 bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
137 bfa_tskim_notify_comp(__tskim); \
138} while (0)
139
140#define bfa_tskim_notify_comp(__tskim) do { \
141 if ((__tskim)->notify) \
142 bfa_itnim_tskdone((__tskim)->itnim); \
143} while (0)
144
145
146enum bfa_tskim_event {
147 BFA_TSKIM_SM_START = 1, /* TM command start */
148 BFA_TSKIM_SM_DONE = 2, /* TM completion */
149 BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
150 BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
151 BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
152 BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
153 BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
154 BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
155};
156
Jing Huang5fbe25c2010-10-18 17:17:23 -0700157/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700158 * forward declaration for BFA ITNIM functions
159 */
160static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
161static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
162static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
163static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
164static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
165static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
166static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
167static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
168static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
169static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
170static void bfa_itnim_iotov(void *itnim_arg);
171static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
172static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
173static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
174
Jing Huang5fbe25c2010-10-18 17:17:23 -0700175/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700176 * forward declaration of ITNIM state machine
177 */
178static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
179 enum bfa_itnim_event event);
180static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
181 enum bfa_itnim_event event);
182static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
183 enum bfa_itnim_event event);
184static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
185 enum bfa_itnim_event event);
186static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
187 enum bfa_itnim_event event);
188static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
189 enum bfa_itnim_event event);
190static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
191 enum bfa_itnim_event event);
192static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
193 enum bfa_itnim_event event);
194static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
195 enum bfa_itnim_event event);
196static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
197 enum bfa_itnim_event event);
198static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
199 enum bfa_itnim_event event);
200static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
201 enum bfa_itnim_event event);
202static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
203 enum bfa_itnim_event event);
204static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
205 enum bfa_itnim_event event);
206static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
207 enum bfa_itnim_event event);
208
Jing Huang5fbe25c2010-10-18 17:17:23 -0700209/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700210 * forward declaration for BFA IOIM functions
211 */
212static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
Maggie Zhange3e7d3e2010-12-09 19:10:27 -0800213static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700214static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
215static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
216static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
217static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
218static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
219static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
220static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
221static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
222
Jing Huang5fbe25c2010-10-18 17:17:23 -0700223/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700224 * forward declaration of BFA IO state machine
225 */
226static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
227 enum bfa_ioim_event event);
228static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
229 enum bfa_ioim_event event);
230static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
231 enum bfa_ioim_event event);
232static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
233 enum bfa_ioim_event event);
234static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
235 enum bfa_ioim_event event);
236static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
237 enum bfa_ioim_event event);
238static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
239 enum bfa_ioim_event event);
240static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
241 enum bfa_ioim_event event);
242static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
243 enum bfa_ioim_event event);
244static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
245 enum bfa_ioim_event event);
246static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
247 enum bfa_ioim_event event);
248static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
249 enum bfa_ioim_event event);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700250/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700251 * forward declaration for BFA TSKIM functions
252 */
253static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
254static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
255static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
Maggie Zhangf3148782010-12-09 19:11:39 -0800256 struct scsi_lun lun);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700257static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
258static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
259static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
260static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
261static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
262static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
263
Jing Huang5fbe25c2010-10-18 17:17:23 -0700264/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700265 * forward declaration of BFA TSKIM state machine
266 */
267static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
268 enum bfa_tskim_event event);
269static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
270 enum bfa_tskim_event event);
271static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
272 enum bfa_tskim_event event);
273static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
274 enum bfa_tskim_event event);
275static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
276 enum bfa_tskim_event event);
277static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
278 enum bfa_tskim_event event);
279static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
280 enum bfa_tskim_event event);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700281/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800282 * BFA FCP Initiator Mode module
Jing Huang7725ccf2009-09-23 17:46:15 -0700283 */
284
Jing Huang5fbe25c2010-10-18 17:17:23 -0700285/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800286 * Compute and return memory needed by FCP(im) module.
Jing Huang7725ccf2009-09-23 17:46:15 -0700287 */
288static void
Krishna Gudipati45070252011-06-24 20:24:29 -0700289bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
Jing Huang7725ccf2009-09-23 17:46:15 -0700290{
Krishna Gudipati45070252011-06-24 20:24:29 -0700291 bfa_itnim_meminfo(cfg, km_len);
Jing Huang7725ccf2009-09-23 17:46:15 -0700292
Jing Huang5fbe25c2010-10-18 17:17:23 -0700293 /*
Jing Huang7725ccf2009-09-23 17:46:15 -0700294 * IO memory
295 */
Jing Huang7725ccf2009-09-23 17:46:15 -0700296 *km_len += cfg->fwcfg.num_ioim_reqs *
297 (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
298
Jing Huang5fbe25c2010-10-18 17:17:23 -0700299 /*
Jing Huang7725ccf2009-09-23 17:46:15 -0700300 * task management command memory
301 */
302 if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
303 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
304 *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
305}
306
307
308static void
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700309bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
Krishna Gudipati45070252011-06-24 20:24:29 -0700310 struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
Jing Huang7725ccf2009-09-23 17:46:15 -0700311{
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700312 struct bfa_fcpim_s *fcpim = &fcp->fcpim;
313 struct bfa_s *bfa = fcp->bfa;
Jing Huang7725ccf2009-09-23 17:46:15 -0700314
315 bfa_trc(bfa, cfg->drvcfg.path_tov);
316 bfa_trc(bfa, cfg->fwcfg.num_rports);
317 bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
318 bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
319
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700320 fcpim->fcp = fcp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700321 fcpim->bfa = bfa;
322 fcpim->num_itnims = cfg->fwcfg.num_rports;
Jing Huang7725ccf2009-09-23 17:46:15 -0700323 fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700324 fcpim->path_tov = cfg->drvcfg.path_tov;
325 fcpim->delay_comp = cfg->drvcfg.delay_comp;
326 fcpim->profile_comp = NULL;
327 fcpim->profile_start = NULL;
Jing Huang7725ccf2009-09-23 17:46:15 -0700328
Krishna Gudipati45070252011-06-24 20:24:29 -0700329 bfa_itnim_attach(fcpim);
330 bfa_tskim_attach(fcpim);
331 bfa_ioim_attach(fcpim);
Jing Huang7725ccf2009-09-23 17:46:15 -0700332}
333
334static void
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700335bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
Jing Huang7725ccf2009-09-23 17:46:15 -0700336{
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700337 struct bfa_fcpim_s *fcpim = &fcp->fcpim;
Jing Huang7725ccf2009-09-23 17:46:15 -0700338 struct bfa_itnim_s *itnim;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700339 struct list_head *qe, *qen;
Jing Huang7725ccf2009-09-23 17:46:15 -0700340
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700341 /* Enqueue unused ioim resources to free_q */
342 list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
343
Jing Huang7725ccf2009-09-23 17:46:15 -0700344 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
345 itnim = (struct bfa_itnim_s *) qe;
346 bfa_itnim_iocdisable(itnim);
347 }
348}
349
350void
351bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
352{
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700353 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -0700354
355 fcpim->path_tov = path_tov * 1000;
356 if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
357 fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
358}
359
360u16
361bfa_fcpim_path_tov_get(struct bfa_s *bfa)
362{
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700363 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -0700364
Jing Huangf8ceafd2009-09-25 12:29:54 -0700365 return fcpim->path_tov / 1000;
Jing Huang7725ccf2009-09-23 17:46:15 -0700366}
367
Jing Huang7725ccf2009-09-23 17:46:15 -0700368u16
369bfa_fcpim_qdepth_get(struct bfa_s *bfa)
370{
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700371 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -0700372
Jing Huangf8ceafd2009-09-25 12:29:54 -0700373 return fcpim->q_depth;
Jing Huang7725ccf2009-09-23 17:46:15 -0700374}
375
Jing Huang5fbe25c2010-10-18 17:17:23 -0700376/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700377 * BFA ITNIM module state machine functions
378 */
379
Jing Huang5fbe25c2010-10-18 17:17:23 -0700380/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800381 * Beginning/unallocated state - no events expected.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700382 */
383static void
384bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
385{
386 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
387 bfa_trc(itnim->bfa, event);
388
389 switch (event) {
390 case BFA_ITNIM_SM_CREATE:
391 bfa_sm_set_state(itnim, bfa_itnim_sm_created);
392 itnim->is_online = BFA_FALSE;
393 bfa_fcpim_additn(itnim);
394 break;
395
396 default:
397 bfa_sm_fault(itnim->bfa, event);
398 }
399}
400
Jing Huang5fbe25c2010-10-18 17:17:23 -0700401/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800402 * Beginning state, only online event expected.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700403 */
404static void
405bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
406{
407 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
408 bfa_trc(itnim->bfa, event);
409
410 switch (event) {
411 case BFA_ITNIM_SM_ONLINE:
412 if (bfa_itnim_send_fwcreate(itnim))
413 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
414 else
415 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
416 break;
417
418 case BFA_ITNIM_SM_DELETE:
419 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
420 bfa_fcpim_delitn(itnim);
421 break;
422
423 case BFA_ITNIM_SM_HWFAIL:
424 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
425 break;
426
427 default:
428 bfa_sm_fault(itnim->bfa, event);
429 }
430}
431
Jing Huang5fbe25c2010-10-18 17:17:23 -0700432/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700433 * Waiting for itnim create response from firmware.
434 */
435static void
436bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
437{
438 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
439 bfa_trc(itnim->bfa, event);
440
441 switch (event) {
442 case BFA_ITNIM_SM_FWRSP:
443 bfa_sm_set_state(itnim, bfa_itnim_sm_online);
444 itnim->is_online = BFA_TRUE;
445 bfa_itnim_iotov_online(itnim);
446 bfa_itnim_online_cb(itnim);
447 break;
448
449 case BFA_ITNIM_SM_DELETE:
450 bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
451 break;
452
453 case BFA_ITNIM_SM_OFFLINE:
454 if (bfa_itnim_send_fwdelete(itnim))
455 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
456 else
457 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
458 break;
459
460 case BFA_ITNIM_SM_HWFAIL:
461 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
462 break;
463
464 default:
465 bfa_sm_fault(itnim->bfa, event);
466 }
467}
468
469static void
470bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
471 enum bfa_itnim_event event)
472{
473 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
474 bfa_trc(itnim->bfa, event);
475
476 switch (event) {
477 case BFA_ITNIM_SM_QRESUME:
478 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
479 bfa_itnim_send_fwcreate(itnim);
480 break;
481
482 case BFA_ITNIM_SM_DELETE:
483 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
484 bfa_reqq_wcancel(&itnim->reqq_wait);
485 bfa_fcpim_delitn(itnim);
486 break;
487
488 case BFA_ITNIM_SM_OFFLINE:
489 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
490 bfa_reqq_wcancel(&itnim->reqq_wait);
491 bfa_itnim_offline_cb(itnim);
492 break;
493
494 case BFA_ITNIM_SM_HWFAIL:
495 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
496 bfa_reqq_wcancel(&itnim->reqq_wait);
497 break;
498
499 default:
500 bfa_sm_fault(itnim->bfa, event);
501 }
502}
503
Jing Huang5fbe25c2010-10-18 17:17:23 -0700504/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800505 * Waiting for itnim create response from firmware, a delete is pending.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700506 */
507static void
508bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
509 enum bfa_itnim_event event)
510{
511 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
512 bfa_trc(itnim->bfa, event);
513
514 switch (event) {
515 case BFA_ITNIM_SM_FWRSP:
516 if (bfa_itnim_send_fwdelete(itnim))
517 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
518 else
519 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
520 break;
521
522 case BFA_ITNIM_SM_HWFAIL:
523 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
524 bfa_fcpim_delitn(itnim);
525 break;
526
527 default:
528 bfa_sm_fault(itnim->bfa, event);
529 }
530}
531
Jing Huang5fbe25c2010-10-18 17:17:23 -0700532/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800533 * Online state - normal parking state.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700534 */
535static void
536bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
537{
538 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
539 bfa_trc(itnim->bfa, event);
540
541 switch (event) {
542 case BFA_ITNIM_SM_OFFLINE:
543 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
544 itnim->is_online = BFA_FALSE;
545 bfa_itnim_iotov_start(itnim);
546 bfa_itnim_cleanup(itnim);
547 break;
548
549 case BFA_ITNIM_SM_DELETE:
550 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
551 itnim->is_online = BFA_FALSE;
552 bfa_itnim_cleanup(itnim);
553 break;
554
555 case BFA_ITNIM_SM_SLER:
556 bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
557 itnim->is_online = BFA_FALSE;
558 bfa_itnim_iotov_start(itnim);
559 bfa_itnim_sler_cb(itnim);
560 break;
561
562 case BFA_ITNIM_SM_HWFAIL:
563 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
564 itnim->is_online = BFA_FALSE;
565 bfa_itnim_iotov_start(itnim);
566 bfa_itnim_iocdisable_cleanup(itnim);
567 break;
568
569 default:
570 bfa_sm_fault(itnim->bfa, event);
571 }
572}
573
Jing Huang5fbe25c2010-10-18 17:17:23 -0700574/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800575 * Second level error recovery need.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700576 */
577static void
578bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
579{
580 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
581 bfa_trc(itnim->bfa, event);
582
583 switch (event) {
584 case BFA_ITNIM_SM_OFFLINE:
585 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
586 bfa_itnim_cleanup(itnim);
587 break;
588
589 case BFA_ITNIM_SM_DELETE:
590 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
591 bfa_itnim_cleanup(itnim);
592 bfa_itnim_iotov_delete(itnim);
593 break;
594
595 case BFA_ITNIM_SM_HWFAIL:
596 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
597 bfa_itnim_iocdisable_cleanup(itnim);
598 break;
599
600 default:
601 bfa_sm_fault(itnim->bfa, event);
602 }
603}
604
Jing Huang5fbe25c2010-10-18 17:17:23 -0700605/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800606 * Going offline. Waiting for active IO cleanup.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700607 */
608static void
609bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
610 enum bfa_itnim_event event)
611{
612 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
613 bfa_trc(itnim->bfa, event);
614
615 switch (event) {
616 case BFA_ITNIM_SM_CLEANUP:
617 if (bfa_itnim_send_fwdelete(itnim))
618 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
619 else
620 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
621 break;
622
623 case BFA_ITNIM_SM_DELETE:
624 bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
625 bfa_itnim_iotov_delete(itnim);
626 break;
627
628 case BFA_ITNIM_SM_HWFAIL:
629 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
630 bfa_itnim_iocdisable_cleanup(itnim);
631 bfa_itnim_offline_cb(itnim);
632 break;
633
634 case BFA_ITNIM_SM_SLER:
635 break;
636
637 default:
638 bfa_sm_fault(itnim->bfa, event);
639 }
640}
641
Jing Huang5fbe25c2010-10-18 17:17:23 -0700642/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800643 * Deleting itnim. Waiting for active IO cleanup.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700644 */
645static void
646bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
647 enum bfa_itnim_event event)
648{
649 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
650 bfa_trc(itnim->bfa, event);
651
652 switch (event) {
653 case BFA_ITNIM_SM_CLEANUP:
654 if (bfa_itnim_send_fwdelete(itnim))
655 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
656 else
657 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
658 break;
659
660 case BFA_ITNIM_SM_HWFAIL:
661 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
662 bfa_itnim_iocdisable_cleanup(itnim);
663 break;
664
665 default:
666 bfa_sm_fault(itnim->bfa, event);
667 }
668}
669
Jing Huang5fbe25c2010-10-18 17:17:23 -0700670/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700671 * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
672 */
673static void
674bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
675{
676 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
677 bfa_trc(itnim->bfa, event);
678
679 switch (event) {
680 case BFA_ITNIM_SM_FWRSP:
681 bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
682 bfa_itnim_offline_cb(itnim);
683 break;
684
685 case BFA_ITNIM_SM_DELETE:
686 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
687 break;
688
689 case BFA_ITNIM_SM_HWFAIL:
690 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
691 bfa_itnim_offline_cb(itnim);
692 break;
693
694 default:
695 bfa_sm_fault(itnim->bfa, event);
696 }
697}
698
699static void
700bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
701 enum bfa_itnim_event event)
702{
703 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
704 bfa_trc(itnim->bfa, event);
705
706 switch (event) {
707 case BFA_ITNIM_SM_QRESUME:
708 bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
709 bfa_itnim_send_fwdelete(itnim);
710 break;
711
712 case BFA_ITNIM_SM_DELETE:
713 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
714 break;
715
716 case BFA_ITNIM_SM_HWFAIL:
717 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
718 bfa_reqq_wcancel(&itnim->reqq_wait);
719 bfa_itnim_offline_cb(itnim);
720 break;
721
722 default:
723 bfa_sm_fault(itnim->bfa, event);
724 }
725}
726
Jing Huang5fbe25c2010-10-18 17:17:23 -0700727/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800728 * Offline state.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700729 */
730static void
731bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
732{
733 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
734 bfa_trc(itnim->bfa, event);
735
736 switch (event) {
737 case BFA_ITNIM_SM_DELETE:
738 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
739 bfa_itnim_iotov_delete(itnim);
740 bfa_fcpim_delitn(itnim);
741 break;
742
743 case BFA_ITNIM_SM_ONLINE:
744 if (bfa_itnim_send_fwcreate(itnim))
745 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
746 else
747 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
748 break;
749
750 case BFA_ITNIM_SM_HWFAIL:
751 bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
752 break;
753
754 default:
755 bfa_sm_fault(itnim->bfa, event);
756 }
757}
758
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700759static void
760bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
761 enum bfa_itnim_event event)
762{
763 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
764 bfa_trc(itnim->bfa, event);
765
766 switch (event) {
767 case BFA_ITNIM_SM_DELETE:
768 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
769 bfa_itnim_iotov_delete(itnim);
770 bfa_fcpim_delitn(itnim);
771 break;
772
773 case BFA_ITNIM_SM_OFFLINE:
774 bfa_itnim_offline_cb(itnim);
775 break;
776
777 case BFA_ITNIM_SM_ONLINE:
778 if (bfa_itnim_send_fwcreate(itnim))
779 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
780 else
781 bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
782 break;
783
784 case BFA_ITNIM_SM_HWFAIL:
785 break;
786
787 default:
788 bfa_sm_fault(itnim->bfa, event);
789 }
790}
791
Jing Huang5fbe25c2010-10-18 17:17:23 -0700792/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800793 * Itnim is deleted, waiting for firmware response to delete.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700794 */
795static void
796bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
797{
798 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
799 bfa_trc(itnim->bfa, event);
800
801 switch (event) {
802 case BFA_ITNIM_SM_FWRSP:
803 case BFA_ITNIM_SM_HWFAIL:
804 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
805 bfa_fcpim_delitn(itnim);
806 break;
807
808 default:
809 bfa_sm_fault(itnim->bfa, event);
810 }
811}
812
813static void
814bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
815 enum bfa_itnim_event event)
816{
817 bfa_trc(itnim->bfa, itnim->rport->rport_tag);
818 bfa_trc(itnim->bfa, event);
819
820 switch (event) {
821 case BFA_ITNIM_SM_QRESUME:
822 bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
823 bfa_itnim_send_fwdelete(itnim);
824 break;
825
826 case BFA_ITNIM_SM_HWFAIL:
827 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
828 bfa_reqq_wcancel(&itnim->reqq_wait);
829 bfa_fcpim_delitn(itnim);
830 break;
831
832 default:
833 bfa_sm_fault(itnim->bfa, event);
834 }
835}
836
Jing Huang5fbe25c2010-10-18 17:17:23 -0700837/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800838 * Initiate cleanup of all IOs on an IOC failure.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700839 */
840static void
841bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
842{
843 struct bfa_tskim_s *tskim;
844 struct bfa_ioim_s *ioim;
845 struct list_head *qe, *qen;
846
847 list_for_each_safe(qe, qen, &itnim->tsk_q) {
848 tskim = (struct bfa_tskim_s *) qe;
849 bfa_tskim_iocdisable(tskim);
850 }
851
852 list_for_each_safe(qe, qen, &itnim->io_q) {
853 ioim = (struct bfa_ioim_s *) qe;
854 bfa_ioim_iocdisable(ioim);
855 }
856
Jing Huang5fbe25c2010-10-18 17:17:23 -0700857 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700858 * For IO request in pending queue, we pretend an early timeout.
859 */
860 list_for_each_safe(qe, qen, &itnim->pending_q) {
861 ioim = (struct bfa_ioim_s *) qe;
862 bfa_ioim_tov(ioim);
863 }
864
865 list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
866 ioim = (struct bfa_ioim_s *) qe;
867 bfa_ioim_iocdisable(ioim);
868 }
869}
870
Jing Huang5fbe25c2010-10-18 17:17:23 -0700871/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800872 * IO cleanup completion
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700873 */
874static void
875bfa_itnim_cleanp_comp(void *itnim_cbarg)
876{
877 struct bfa_itnim_s *itnim = itnim_cbarg;
878
879 bfa_stats(itnim, cleanup_comps);
880 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
881}
882
Jing Huang5fbe25c2010-10-18 17:17:23 -0700883/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800884 * Initiate cleanup of all IOs.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700885 */
886static void
887bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
888{
889 struct bfa_ioim_s *ioim;
890 struct bfa_tskim_s *tskim;
891 struct list_head *qe, *qen;
892
893 bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
894
895 list_for_each_safe(qe, qen, &itnim->io_q) {
896 ioim = (struct bfa_ioim_s *) qe;
897
Jing Huang5fbe25c2010-10-18 17:17:23 -0700898 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700899 * Move IO to a cleanup queue from active queue so that a later
900 * TM will not pickup this IO.
901 */
902 list_del(&ioim->qe);
903 list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
904
905 bfa_wc_up(&itnim->wc);
906 bfa_ioim_cleanup(ioim);
907 }
908
909 list_for_each_safe(qe, qen, &itnim->tsk_q) {
910 tskim = (struct bfa_tskim_s *) qe;
911 bfa_wc_up(&itnim->wc);
912 bfa_tskim_cleanup(tskim);
913 }
914
915 bfa_wc_wait(&itnim->wc);
916}
917
918static void
919__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
920{
921 struct bfa_itnim_s *itnim = cbarg;
922
923 if (complete)
924 bfa_cb_itnim_online(itnim->ditn);
925}
926
927static void
928__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
929{
930 struct bfa_itnim_s *itnim = cbarg;
931
932 if (complete)
933 bfa_cb_itnim_offline(itnim->ditn);
934}
935
936static void
937__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
938{
939 struct bfa_itnim_s *itnim = cbarg;
940
941 if (complete)
942 bfa_cb_itnim_sler(itnim->ditn);
943}
944
Jing Huang5fbe25c2010-10-18 17:17:23 -0700945/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700946 * Call to resume any I/O requests waiting for room in request queue.
947 */
948static void
949bfa_itnim_qresume(void *cbarg)
950{
951 struct bfa_itnim_s *itnim = cbarg;
952
953 bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
954}
955
Jing Huang5fbe25c2010-10-18 17:17:23 -0700956/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700957 * bfa_itnim_public
958 */
959
960void
961bfa_itnim_iodone(struct bfa_itnim_s *itnim)
962{
963 bfa_wc_down(&itnim->wc);
964}
965
966void
967bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
968{
969 bfa_wc_down(&itnim->wc);
970}
971
972void
Krishna Gudipati45070252011-06-24 20:24:29 -0700973bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700974{
Jing Huang5fbe25c2010-10-18 17:17:23 -0700975 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700976 * ITN memory
977 */
978 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
979}
980
981void
Krishna Gudipati45070252011-06-24 20:24:29 -0700982bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700983{
984 struct bfa_s *bfa = fcpim->bfa;
Krishna Gudipati45070252011-06-24 20:24:29 -0700985 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700986 struct bfa_itnim_s *itnim;
987 int i, j;
988
989 INIT_LIST_HEAD(&fcpim->itnim_q);
990
Krishna Gudipati45070252011-06-24 20:24:29 -0700991 itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700992 fcpim->itnim_arr = itnim;
993
994 for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
Jing Huang6a18b162010-10-18 17:08:54 -0700995 memset(itnim, 0, sizeof(struct bfa_itnim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700996 itnim->bfa = bfa;
997 itnim->fcpim = fcpim;
998 itnim->reqq = BFA_REQQ_QOS_LO;
999 itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
1000 itnim->iotov_active = BFA_FALSE;
1001 bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
1002
1003 INIT_LIST_HEAD(&itnim->io_q);
1004 INIT_LIST_HEAD(&itnim->io_cleanup_q);
1005 INIT_LIST_HEAD(&itnim->pending_q);
1006 INIT_LIST_HEAD(&itnim->tsk_q);
1007 INIT_LIST_HEAD(&itnim->delay_comp_q);
1008 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1009 itnim->ioprofile.io_latency.min[j] = ~0;
1010 bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
1011 }
1012
Krishna Gudipati45070252011-06-24 20:24:29 -07001013 bfa_mem_kva_curp(fcp) = (u8 *) itnim;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001014}
1015
1016void
1017bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
1018{
1019 bfa_stats(itnim, ioc_disabled);
1020 bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
1021}
1022
1023static bfa_boolean_t
1024bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
1025{
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001026 struct bfi_itn_create_req_s *m;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001027
1028 itnim->msg_no++;
1029
Jing Huang5fbe25c2010-10-18 17:17:23 -07001030 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001031 * check for room in queue to send request now
1032 */
1033 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1034 if (!m) {
1035 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1036 return BFA_FALSE;
1037 }
1038
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001039 bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001040 bfa_fn_lpu(itnim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001041 m->fw_handle = itnim->rport->fw_handle;
1042 m->class = FC_CLASS_3;
1043 m->seq_rec = itnim->seq_rec;
1044 m->msg_no = itnim->msg_no;
1045 bfa_stats(itnim, fw_create);
1046
Jing Huang5fbe25c2010-10-18 17:17:23 -07001047 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001048 * queue I/O message to firmware
1049 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001050 bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001051 return BFA_TRUE;
1052}
1053
1054static bfa_boolean_t
1055bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
1056{
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001057 struct bfi_itn_delete_req_s *m;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001058
Jing Huang5fbe25c2010-10-18 17:17:23 -07001059 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001060 * check for room in queue to send request now
1061 */
1062 m = bfa_reqq_next(itnim->bfa, itnim->reqq);
1063 if (!m) {
1064 bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
1065 return BFA_FALSE;
1066 }
1067
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001068 bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001069 bfa_fn_lpu(itnim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001070 m->fw_handle = itnim->rport->fw_handle;
1071 bfa_stats(itnim, fw_delete);
1072
Jing Huang5fbe25c2010-10-18 17:17:23 -07001073 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001074 * queue I/O message to firmware
1075 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001076 bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001077 return BFA_TRUE;
1078}
1079
Jing Huang5fbe25c2010-10-18 17:17:23 -07001080/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001081 * Cleanup all pending failed inflight requests.
1082 */
1083static void
1084bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
1085{
1086 struct bfa_ioim_s *ioim;
1087 struct list_head *qe, *qen;
1088
1089 list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
1090 ioim = (struct bfa_ioim_s *)qe;
1091 bfa_ioim_delayed_comp(ioim, iotov);
1092 }
1093}
1094
Jing Huang5fbe25c2010-10-18 17:17:23 -07001095/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001096 * Start all pending IO requests.
1097 */
1098static void
1099bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
1100{
1101 struct bfa_ioim_s *ioim;
1102
1103 bfa_itnim_iotov_stop(itnim);
1104
Jing Huang5fbe25c2010-10-18 17:17:23 -07001105 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001106 * Abort all inflight IO requests in the queue
1107 */
1108 bfa_itnim_delayed_comp(itnim, BFA_FALSE);
1109
Jing Huang5fbe25c2010-10-18 17:17:23 -07001110 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001111 * Start all pending IO requests.
1112 */
1113 while (!list_empty(&itnim->pending_q)) {
1114 bfa_q_deq(&itnim->pending_q, &ioim);
1115 list_add_tail(&ioim->qe, &itnim->io_q);
1116 bfa_ioim_start(ioim);
1117 }
1118}
1119
Jing Huang5fbe25c2010-10-18 17:17:23 -07001120/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001121 * Fail all pending IO requests
1122 */
1123static void
1124bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
1125{
1126 struct bfa_ioim_s *ioim;
1127
Jing Huang5fbe25c2010-10-18 17:17:23 -07001128 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001129 * Fail all inflight IO requests in the queue
1130 */
1131 bfa_itnim_delayed_comp(itnim, BFA_TRUE);
1132
Jing Huang5fbe25c2010-10-18 17:17:23 -07001133 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001134 * Fail any pending IO requests.
1135 */
1136 while (!list_empty(&itnim->pending_q)) {
1137 bfa_q_deq(&itnim->pending_q, &ioim);
1138 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1139 bfa_ioim_tov(ioim);
1140 }
1141}
1142
Jing Huang5fbe25c2010-10-18 17:17:23 -07001143/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001144 * IO TOV timer callback. Fail any pending IO requests.
1145 */
1146static void
1147bfa_itnim_iotov(void *itnim_arg)
1148{
1149 struct bfa_itnim_s *itnim = itnim_arg;
1150
1151 itnim->iotov_active = BFA_FALSE;
1152
1153 bfa_cb_itnim_tov_begin(itnim->ditn);
1154 bfa_itnim_iotov_cleanup(itnim);
1155 bfa_cb_itnim_tov(itnim->ditn);
1156}
1157
Jing Huang5fbe25c2010-10-18 17:17:23 -07001158/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001159 * Start IO TOV timer for failing back pending IO requests in offline state.
1160 */
1161static void
1162bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
1163{
1164 if (itnim->fcpim->path_tov > 0) {
1165
1166 itnim->iotov_active = BFA_TRUE;
Jing Huangd4b671c2010-12-26 21:46:35 -08001167 WARN_ON(!bfa_itnim_hold_io(itnim));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001168 bfa_timer_start(itnim->bfa, &itnim->timer,
1169 bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
1170 }
1171}
1172
Jing Huang5fbe25c2010-10-18 17:17:23 -07001173/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001174 * Stop IO TOV timer.
1175 */
1176static void
1177bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
1178{
1179 if (itnim->iotov_active) {
1180 itnim->iotov_active = BFA_FALSE;
1181 bfa_timer_stop(&itnim->timer);
1182 }
1183}
1184
Jing Huang5fbe25c2010-10-18 17:17:23 -07001185/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001186 * Stop IO TOV timer.
1187 */
1188static void
1189bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
1190{
1191 bfa_boolean_t pathtov_active = BFA_FALSE;
1192
1193 if (itnim->iotov_active)
1194 pathtov_active = BFA_TRUE;
1195
1196 bfa_itnim_iotov_stop(itnim);
1197 if (pathtov_active)
1198 bfa_cb_itnim_tov_begin(itnim->ditn);
1199 bfa_itnim_iotov_cleanup(itnim);
1200 if (pathtov_active)
1201 bfa_cb_itnim_tov(itnim->ditn);
1202}
1203
1204static void
1205bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
1206{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001207 struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001208 fcpim->del_itn_stats.del_itn_iocomp_aborted +=
1209 itnim->stats.iocomp_aborted;
1210 fcpim->del_itn_stats.del_itn_iocomp_timedout +=
1211 itnim->stats.iocomp_timedout;
1212 fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
1213 itnim->stats.iocom_sqer_needed;
1214 fcpim->del_itn_stats.del_itn_iocom_res_free +=
1215 itnim->stats.iocom_res_free;
1216 fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
1217 itnim->stats.iocom_hostabrts;
1218 fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
1219 fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
1220 fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
1221}
1222
Jing Huang5fbe25c2010-10-18 17:17:23 -07001223/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001224 * bfa_itnim_public
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001225 */
1226
Jing Huang5fbe25c2010-10-18 17:17:23 -07001227/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001228 * Itnim interrupt processing.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001229 */
1230void
1231bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1232{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001233 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001234 union bfi_itn_i2h_msg_u msg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001235 struct bfa_itnim_s *itnim;
1236
1237 bfa_trc(bfa, m->mhdr.msg_id);
1238
1239 msg.msg = m;
1240
1241 switch (m->mhdr.msg_id) {
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001242 case BFI_ITN_I2H_CREATE_RSP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001243 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1244 msg.create_rsp->bfa_handle);
Jing Huangd4b671c2010-12-26 21:46:35 -08001245 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001246 bfa_stats(itnim, create_comps);
1247 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1248 break;
1249
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001250 case BFI_ITN_I2H_DELETE_RSP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001251 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1252 msg.delete_rsp->bfa_handle);
Jing Huangd4b671c2010-12-26 21:46:35 -08001253 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001254 bfa_stats(itnim, delete_comps);
1255 bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
1256 break;
1257
Krishna Gudipatidd5aaf42011-06-13 15:51:24 -07001258 case BFI_ITN_I2H_SLER_EVENT:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001259 itnim = BFA_ITNIM_FROM_TAG(fcpim,
1260 msg.sler_event->bfa_handle);
1261 bfa_stats(itnim, sler_events);
1262 bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
1263 break;
1264
1265 default:
1266 bfa_trc(bfa, m->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08001267 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001268 }
1269}
1270
Jing Huang5fbe25c2010-10-18 17:17:23 -07001271/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001272 * bfa_itnim_api
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001273 */
1274
1275struct bfa_itnim_s *
1276bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
1277{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001278 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001279 struct bfa_itnim_s *itnim;
1280
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001281 bfa_itn_create(bfa, rport, bfa_itnim_isr);
1282
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001283 itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
Jing Huangd4b671c2010-12-26 21:46:35 -08001284 WARN_ON(itnim->rport != rport);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001285
1286 itnim->ditn = ditn;
1287
1288 bfa_stats(itnim, creates);
1289 bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
1290
1291 return itnim;
1292}
1293
1294void
1295bfa_itnim_delete(struct bfa_itnim_s *itnim)
1296{
1297 bfa_stats(itnim, deletes);
1298 bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
1299}
1300
1301void
1302bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
1303{
1304 itnim->seq_rec = seq_rec;
1305 bfa_stats(itnim, onlines);
1306 bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
1307}
1308
1309void
1310bfa_itnim_offline(struct bfa_itnim_s *itnim)
1311{
1312 bfa_stats(itnim, offlines);
1313 bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
1314}
1315
Jing Huang5fbe25c2010-10-18 17:17:23 -07001316/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001317 * Return true if itnim is considered offline for holding off IO request.
1318 * IO is not held if itnim is being deleted.
1319 */
1320bfa_boolean_t
1321bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
1322{
1323 return itnim->fcpim->path_tov && itnim->iotov_active &&
1324 (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
1325 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
1326 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
1327 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
1328 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
1329 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
1330}
1331
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001332void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001333bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
1334{
1335 int j;
Jing Huang6a18b162010-10-18 17:08:54 -07001336 memset(&itnim->stats, 0, sizeof(itnim->stats));
1337 memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001338 for (j = 0; j < BFA_IOBUCKET_MAX; j++)
1339 itnim->ioprofile.io_latency.min[j] = ~0;
1340}
1341
Jing Huang5fbe25c2010-10-18 17:17:23 -07001342/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001343 * BFA IO module state machine functions
1344 */
1345
Jing Huang5fbe25c2010-10-18 17:17:23 -07001346/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001347 * IO is not started (unallocated).
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001348 */
1349static void
1350bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1351{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001352 switch (event) {
1353 case BFA_IOIM_SM_START:
1354 if (!bfa_itnim_is_online(ioim->itnim)) {
1355 if (!bfa_itnim_hold_io(ioim->itnim)) {
1356 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1357 list_del(&ioim->qe);
1358 list_add_tail(&ioim->qe,
1359 &ioim->fcpim->ioim_comp_q);
1360 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1361 __bfa_cb_ioim_pathtov, ioim);
1362 } else {
1363 list_del(&ioim->qe);
1364 list_add_tail(&ioim->qe,
1365 &ioim->itnim->pending_q);
1366 }
1367 break;
1368 }
1369
1370 if (ioim->nsges > BFI_SGE_INLINE) {
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08001371 if (!bfa_ioim_sgpg_alloc(ioim)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001372 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
1373 return;
1374 }
1375 }
1376
1377 if (!bfa_ioim_send_ioreq(ioim)) {
1378 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1379 break;
1380 }
1381
1382 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1383 break;
1384
1385 case BFA_IOIM_SM_IOTOV:
1386 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1387 bfa_ioim_move_to_comp_q(ioim);
1388 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1389 __bfa_cb_ioim_pathtov, ioim);
1390 break;
1391
1392 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001393 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001394 * IO in pending queue can get abort requests. Complete abort
1395 * requests immediately.
1396 */
1397 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
Jing Huangd4b671c2010-12-26 21:46:35 -08001398 WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001399 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1400 __bfa_cb_ioim_abort, ioim);
1401 break;
1402
1403 default:
1404 bfa_sm_fault(ioim->bfa, event);
1405 }
1406}
1407
Jing Huang5fbe25c2010-10-18 17:17:23 -07001408/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001409 * IO is waiting for SG pages.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001410 */
1411static void
1412bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1413{
1414 bfa_trc(ioim->bfa, ioim->iotag);
1415 bfa_trc(ioim->bfa, event);
1416
1417 switch (event) {
1418 case BFA_IOIM_SM_SGALLOCED:
1419 if (!bfa_ioim_send_ioreq(ioim)) {
1420 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1421 break;
1422 }
1423 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1424 break;
1425
1426 case BFA_IOIM_SM_CLEANUP:
1427 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1428 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1429 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1430 ioim);
1431 bfa_ioim_notify_cleanup(ioim);
1432 break;
1433
1434 case BFA_IOIM_SM_ABORT:
1435 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1436 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1437 bfa_ioim_move_to_comp_q(ioim);
1438 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1439 ioim);
1440 break;
1441
1442 case BFA_IOIM_SM_HWFAIL:
1443 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1444 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
1445 bfa_ioim_move_to_comp_q(ioim);
1446 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1447 ioim);
1448 break;
1449
1450 default:
1451 bfa_sm_fault(ioim->bfa, event);
1452 }
1453}
1454
Jing Huang5fbe25c2010-10-18 17:17:23 -07001455/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001456 * IO is active.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001457 */
1458static void
1459bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1460{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001461 switch (event) {
1462 case BFA_IOIM_SM_COMP_GOOD:
1463 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1464 bfa_ioim_move_to_comp_q(ioim);
1465 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1466 __bfa_cb_ioim_good_comp, ioim);
1467 break;
1468
1469 case BFA_IOIM_SM_COMP:
1470 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1471 bfa_ioim_move_to_comp_q(ioim);
1472 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1473 ioim);
1474 break;
1475
1476 case BFA_IOIM_SM_DONE:
1477 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1478 bfa_ioim_move_to_comp_q(ioim);
1479 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
1480 ioim);
1481 break;
1482
1483 case BFA_IOIM_SM_ABORT:
1484 ioim->iosp->abort_explicit = BFA_TRUE;
1485 ioim->io_cbfn = __bfa_cb_ioim_abort;
1486
1487 if (bfa_ioim_send_abort(ioim))
1488 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1489 else {
1490 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
1491 bfa_stats(ioim->itnim, qwait);
1492 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1493 &ioim->iosp->reqq_wait);
1494 }
1495 break;
1496
1497 case BFA_IOIM_SM_CLEANUP:
1498 ioim->iosp->abort_explicit = BFA_FALSE;
1499 ioim->io_cbfn = __bfa_cb_ioim_failed;
1500
1501 if (bfa_ioim_send_abort(ioim))
1502 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1503 else {
1504 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1505 bfa_stats(ioim->itnim, qwait);
1506 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1507 &ioim->iosp->reqq_wait);
1508 }
1509 break;
1510
1511 case BFA_IOIM_SM_HWFAIL:
1512 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1513 bfa_ioim_move_to_comp_q(ioim);
1514 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1515 ioim);
1516 break;
1517
1518 case BFA_IOIM_SM_SQRETRY:
Krishna Gudipati15821f02010-12-13 16:23:27 -08001519 if (bfa_ioim_maxretry_reached(ioim)) {
1520 /* max retry reached, free IO */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001521 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1522 bfa_ioim_move_to_comp_q(ioim);
1523 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1524 __bfa_cb_ioim_failed, ioim);
1525 break;
1526 }
1527 /* waiting for IO tag resource free */
1528 bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
1529 break;
1530
1531 default:
1532 bfa_sm_fault(ioim->bfa, event);
1533 }
1534}
1535
Jing Huang5fbe25c2010-10-18 17:17:23 -07001536/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001537 * IO is retried with new tag.
1538 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001539static void
1540bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1541{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001542 switch (event) {
1543 case BFA_IOIM_SM_FREE:
1544 /* abts and rrq done. Now retry the IO with new tag */
Krishna Gudipati15821f02010-12-13 16:23:27 -08001545 bfa_ioim_update_iotag(ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001546 if (!bfa_ioim_send_ioreq(ioim)) {
1547 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
1548 break;
1549 }
1550 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1551 break;
1552
1553 case BFA_IOIM_SM_CLEANUP:
1554 ioim->iosp->abort_explicit = BFA_FALSE;
1555 ioim->io_cbfn = __bfa_cb_ioim_failed;
1556
1557 if (bfa_ioim_send_abort(ioim))
1558 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1559 else {
1560 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1561 bfa_stats(ioim->itnim, qwait);
1562 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1563 &ioim->iosp->reqq_wait);
1564 }
1565 break;
1566
1567 case BFA_IOIM_SM_HWFAIL:
1568 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1569 bfa_ioim_move_to_comp_q(ioim);
1570 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
1571 __bfa_cb_ioim_failed, ioim);
1572 break;
1573
1574 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001575 /* in this state IO abort is done.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001576 * Waiting for IO tag resource free.
1577 */
1578 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1579 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1580 ioim);
1581 break;
1582
1583 default:
1584 bfa_sm_fault(ioim->bfa, event);
1585 }
1586}
1587
Jing Huang5fbe25c2010-10-18 17:17:23 -07001588/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001589 * IO is being aborted, waiting for completion from firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001590 */
1591static void
1592bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1593{
1594 bfa_trc(ioim->bfa, ioim->iotag);
1595 bfa_trc(ioim->bfa, event);
1596
1597 switch (event) {
1598 case BFA_IOIM_SM_COMP_GOOD:
1599 case BFA_IOIM_SM_COMP:
1600 case BFA_IOIM_SM_DONE:
1601 case BFA_IOIM_SM_FREE:
1602 break;
1603
1604 case BFA_IOIM_SM_ABORT_DONE:
1605 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1606 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1607 ioim);
1608 break;
1609
1610 case BFA_IOIM_SM_ABORT_COMP:
1611 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1612 bfa_ioim_move_to_comp_q(ioim);
1613 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1614 ioim);
1615 break;
1616
1617 case BFA_IOIM_SM_COMP_UTAG:
1618 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1619 bfa_ioim_move_to_comp_q(ioim);
1620 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1621 ioim);
1622 break;
1623
1624 case BFA_IOIM_SM_CLEANUP:
Jing Huangd4b671c2010-12-26 21:46:35 -08001625 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001626 ioim->iosp->abort_explicit = BFA_FALSE;
1627
1628 if (bfa_ioim_send_abort(ioim))
1629 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1630 else {
1631 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1632 bfa_stats(ioim->itnim, qwait);
1633 bfa_reqq_wait(ioim->bfa, ioim->reqq,
1634 &ioim->iosp->reqq_wait);
1635 }
1636 break;
1637
1638 case BFA_IOIM_SM_HWFAIL:
1639 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1640 bfa_ioim_move_to_comp_q(ioim);
1641 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1642 ioim);
1643 break;
1644
1645 default:
1646 bfa_sm_fault(ioim->bfa, event);
1647 }
1648}
1649
Jing Huang5fbe25c2010-10-18 17:17:23 -07001650/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001651 * IO is being cleaned up (implicit abort), waiting for completion from
1652 * firmware.
1653 */
1654static void
1655bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1656{
1657 bfa_trc(ioim->bfa, ioim->iotag);
1658 bfa_trc(ioim->bfa, event);
1659
1660 switch (event) {
1661 case BFA_IOIM_SM_COMP_GOOD:
1662 case BFA_IOIM_SM_COMP:
1663 case BFA_IOIM_SM_DONE:
1664 case BFA_IOIM_SM_FREE:
1665 break;
1666
1667 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001668 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001669 * IO is already being aborted implicitly
1670 */
1671 ioim->io_cbfn = __bfa_cb_ioim_abort;
1672 break;
1673
1674 case BFA_IOIM_SM_ABORT_DONE:
1675 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1676 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1677 bfa_ioim_notify_cleanup(ioim);
1678 break;
1679
1680 case BFA_IOIM_SM_ABORT_COMP:
1681 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1682 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1683 bfa_ioim_notify_cleanup(ioim);
1684 break;
1685
1686 case BFA_IOIM_SM_COMP_UTAG:
1687 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1688 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1689 bfa_ioim_notify_cleanup(ioim);
1690 break;
1691
1692 case BFA_IOIM_SM_HWFAIL:
1693 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1694 bfa_ioim_move_to_comp_q(ioim);
1695 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1696 ioim);
1697 break;
1698
1699 case BFA_IOIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001700 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001701 * IO can be in cleanup state already due to TM command.
1702 * 2nd cleanup request comes from ITN offline event.
1703 */
1704 break;
1705
1706 default:
1707 bfa_sm_fault(ioim->bfa, event);
1708 }
1709}
1710
Jing Huang5fbe25c2010-10-18 17:17:23 -07001711/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001712 * IO is waiting for room in request CQ
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001713 */
1714static void
1715bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1716{
1717 bfa_trc(ioim->bfa, ioim->iotag);
1718 bfa_trc(ioim->bfa, event);
1719
1720 switch (event) {
1721 case BFA_IOIM_SM_QRESUME:
1722 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
1723 bfa_ioim_send_ioreq(ioim);
1724 break;
1725
1726 case BFA_IOIM_SM_ABORT:
1727 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1728 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1729 bfa_ioim_move_to_comp_q(ioim);
1730 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1731 ioim);
1732 break;
1733
1734 case BFA_IOIM_SM_CLEANUP:
1735 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1736 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1737 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1738 ioim);
1739 bfa_ioim_notify_cleanup(ioim);
1740 break;
1741
1742 case BFA_IOIM_SM_HWFAIL:
1743 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1744 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1745 bfa_ioim_move_to_comp_q(ioim);
1746 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1747 ioim);
1748 break;
1749
1750 default:
1751 bfa_sm_fault(ioim->bfa, event);
1752 }
1753}
1754
Jing Huang5fbe25c2010-10-18 17:17:23 -07001755/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001756 * Active IO is being aborted, waiting for room in request CQ.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001757 */
1758static void
1759bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1760{
1761 bfa_trc(ioim->bfa, ioim->iotag);
1762 bfa_trc(ioim->bfa, event);
1763
1764 switch (event) {
1765 case BFA_IOIM_SM_QRESUME:
1766 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
1767 bfa_ioim_send_abort(ioim);
1768 break;
1769
1770 case BFA_IOIM_SM_CLEANUP:
Jing Huangd4b671c2010-12-26 21:46:35 -08001771 WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001772 ioim->iosp->abort_explicit = BFA_FALSE;
1773 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
1774 break;
1775
1776 case BFA_IOIM_SM_COMP_GOOD:
1777 case BFA_IOIM_SM_COMP:
1778 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1779 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1780 bfa_ioim_move_to_comp_q(ioim);
1781 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1782 ioim);
1783 break;
1784
1785 case BFA_IOIM_SM_DONE:
1786 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1787 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1788 bfa_ioim_move_to_comp_q(ioim);
1789 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
1790 ioim);
1791 break;
1792
1793 case BFA_IOIM_SM_HWFAIL:
1794 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1795 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1796 bfa_ioim_move_to_comp_q(ioim);
1797 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1798 ioim);
1799 break;
1800
1801 default:
1802 bfa_sm_fault(ioim->bfa, event);
1803 }
1804}
1805
Jing Huang5fbe25c2010-10-18 17:17:23 -07001806/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001807 * Active IO is being cleaned up, waiting for room in request CQ.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001808 */
1809static void
1810bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1811{
1812 bfa_trc(ioim->bfa, ioim->iotag);
1813 bfa_trc(ioim->bfa, event);
1814
1815 switch (event) {
1816 case BFA_IOIM_SM_QRESUME:
1817 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
1818 bfa_ioim_send_abort(ioim);
1819 break;
1820
1821 case BFA_IOIM_SM_ABORT:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001822 /*
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001823 * IO is already being cleaned up implicitly
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001824 */
1825 ioim->io_cbfn = __bfa_cb_ioim_abort;
1826 break;
1827
1828 case BFA_IOIM_SM_COMP_GOOD:
1829 case BFA_IOIM_SM_COMP:
1830 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1831 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1832 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1833 bfa_ioim_notify_cleanup(ioim);
1834 break;
1835
1836 case BFA_IOIM_SM_DONE:
1837 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
1838 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1839 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1840 bfa_ioim_notify_cleanup(ioim);
1841 break;
1842
1843 case BFA_IOIM_SM_HWFAIL:
1844 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1845 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
1846 bfa_ioim_move_to_comp_q(ioim);
1847 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
1848 ioim);
1849 break;
1850
1851 default:
1852 bfa_sm_fault(ioim->bfa, event);
1853 }
1854}
1855
Jing Huang5fbe25c2010-10-18 17:17:23 -07001856/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001857 * IO bfa callback is pending.
1858 */
1859static void
1860bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1861{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001862 switch (event) {
1863 case BFA_IOIM_SM_HCB:
1864 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1865 bfa_ioim_free(ioim);
1866 break;
1867
1868 case BFA_IOIM_SM_CLEANUP:
1869 bfa_ioim_notify_cleanup(ioim);
1870 break;
1871
1872 case BFA_IOIM_SM_HWFAIL:
1873 break;
1874
1875 default:
1876 bfa_sm_fault(ioim->bfa, event);
1877 }
1878}
1879
Jing Huang5fbe25c2010-10-18 17:17:23 -07001880/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001881 * IO bfa callback is pending. IO resource cannot be freed.
1882 */
1883static void
1884bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1885{
1886 bfa_trc(ioim->bfa, ioim->iotag);
1887 bfa_trc(ioim->bfa, event);
1888
1889 switch (event) {
1890 case BFA_IOIM_SM_HCB:
1891 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
1892 list_del(&ioim->qe);
1893 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
1894 break;
1895
1896 case BFA_IOIM_SM_FREE:
1897 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1898 break;
1899
1900 case BFA_IOIM_SM_CLEANUP:
1901 bfa_ioim_notify_cleanup(ioim);
1902 break;
1903
1904 case BFA_IOIM_SM_HWFAIL:
1905 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
1906 break;
1907
1908 default:
1909 bfa_sm_fault(ioim->bfa, event);
1910 }
1911}
1912
Jing Huang5fbe25c2010-10-18 17:17:23 -07001913/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001914 * IO is completed, waiting resource free from firmware.
1915 */
1916static void
1917bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
1918{
1919 bfa_trc(ioim->bfa, ioim->iotag);
1920 bfa_trc(ioim->bfa, event);
1921
1922 switch (event) {
1923 case BFA_IOIM_SM_FREE:
1924 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1925 bfa_ioim_free(ioim);
1926 break;
1927
1928 case BFA_IOIM_SM_CLEANUP:
1929 bfa_ioim_notify_cleanup(ioim);
1930 break;
1931
1932 case BFA_IOIM_SM_HWFAIL:
1933 break;
1934
1935 default:
1936 bfa_sm_fault(ioim->bfa, event);
1937 }
1938}
1939
1940
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001941static void
1942__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
1943{
1944 struct bfa_ioim_s *ioim = cbarg;
1945
1946 if (!complete) {
1947 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1948 return;
1949 }
1950
1951 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
1952}
1953
1954static void
1955__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
1956{
1957 struct bfa_ioim_s *ioim = cbarg;
1958 struct bfi_ioim_rsp_s *m;
1959 u8 *snsinfo = NULL;
1960 u8 sns_len = 0;
1961 s32 residue = 0;
1962
1963 if (!complete) {
1964 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
1965 return;
1966 }
1967
1968 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
1969 if (m->io_status == BFI_IOIM_STS_OK) {
Jing Huang5fbe25c2010-10-18 17:17:23 -07001970 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001971 * setup sense information, if present
1972 */
1973 if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
1974 m->sns_len) {
1975 sns_len = m->sns_len;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001976 snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
1977 ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001978 }
1979
Jing Huang5fbe25c2010-10-18 17:17:23 -07001980 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001981 * setup residue value correctly for normal completions
1982 */
1983 if (m->resid_flags == FCP_RESID_UNDER) {
Jing Huangba816ea2010-10-18 17:10:50 -07001984 residue = be32_to_cpu(m->residue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001985 bfa_stats(ioim->itnim, iocomp_underrun);
1986 }
1987 if (m->resid_flags == FCP_RESID_OVER) {
Jing Huangba816ea2010-10-18 17:10:50 -07001988 residue = be32_to_cpu(m->residue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001989 residue = -residue;
1990 bfa_stats(ioim->itnim, iocomp_overrun);
1991 }
1992 }
1993
1994 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
1995 m->scsi_status, sns_len, snsinfo, residue);
1996}
1997
1998static void
1999__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
2000{
2001 struct bfa_ioim_s *ioim = cbarg;
2002
2003 if (!complete) {
2004 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2005 return;
2006 }
2007
2008 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
2009 0, 0, NULL, 0);
2010}
2011
2012static void
2013__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
2014{
2015 struct bfa_ioim_s *ioim = cbarg;
2016
2017 bfa_stats(ioim->itnim, path_tov_expired);
2018 if (!complete) {
2019 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2020 return;
2021 }
2022
2023 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
2024 0, 0, NULL, 0);
2025}
2026
2027static void
2028__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
2029{
2030 struct bfa_ioim_s *ioim = cbarg;
2031
2032 if (!complete) {
2033 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
2034 return;
2035 }
2036
2037 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
2038}
2039
2040static void
2041bfa_ioim_sgpg_alloced(void *cbarg)
2042{
2043 struct bfa_ioim_s *ioim = cbarg;
2044
2045 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2046 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002047 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002048 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
2049}
2050
Jing Huang5fbe25c2010-10-18 17:17:23 -07002051/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002052 * Send I/O request to firmware.
2053 */
2054static bfa_boolean_t
2055bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
2056{
2057 struct bfa_itnim_s *itnim = ioim->itnim;
2058 struct bfi_ioim_req_s *m;
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002059 static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002060 struct bfi_sge_s *sge, *sgpge;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002061 u32 pgdlen = 0;
2062 u32 fcp_dl;
2063 u64 addr;
2064 struct scatterlist *sg;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002065 struct bfa_sgpg_s *sgpg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002066 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002067 u32 i, sge_id, pgcumsz;
Maggie Zhangf3148782010-12-09 19:11:39 -08002068 enum dma_data_direction dmadir;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002069
Jing Huang5fbe25c2010-10-18 17:17:23 -07002070 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002071 * check for room in queue to send request now
2072 */
2073 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2074 if (!m) {
2075 bfa_stats(ioim->itnim, qwait);
2076 bfa_reqq_wait(ioim->bfa, ioim->reqq,
2077 &ioim->iosp->reqq_wait);
2078 return BFA_FALSE;
2079 }
2080
Jing Huang5fbe25c2010-10-18 17:17:23 -07002081 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002082 * build i/o request message next
2083 */
Jing Huangba816ea2010-10-18 17:10:50 -07002084 m->io_tag = cpu_to_be16(ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002085 m->rport_hdl = ioim->itnim->rport->fw_handle;
Maggie Zhangf3148782010-12-09 19:11:39 -08002086 m->io_timeout = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002087
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002088 sge = &m->sges[0];
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002089 sgpg = ioim->sgpg;
2090 sge_id = 0;
2091 sgpge = NULL;
2092 pgcumsz = 0;
2093 scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
2094 if (i == 0) {
2095 /* build inline IO SG element */
Maggie Zhangf16a1752010-12-09 19:12:32 -08002096 addr = bfa_sgaddr_le(sg_dma_address(sg));
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002097 sge->sga = *(union bfi_addr_u *) &addr;
2098 pgdlen = sg_dma_len(sg);
2099 sge->sg_len = pgdlen;
2100 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002101 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002102 bfa_sge_to_be(sge);
2103 sge++;
2104 } else {
2105 if (sge_id == 0)
2106 sgpge = sgpg->sgpg->sges;
2107
Maggie Zhangf16a1752010-12-09 19:12:32 -08002108 addr = bfa_sgaddr_le(sg_dma_address(sg));
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002109 sgpge->sga = *(union bfi_addr_u *) &addr;
2110 sgpge->sg_len = sg_dma_len(sg);
2111 pgcumsz += sgpge->sg_len;
2112
2113 /* set flags */
2114 if (i < (ioim->nsges - 1) &&
2115 sge_id < (BFI_SGPG_DATA_SGES - 1))
2116 sgpge->flags = BFI_SGE_DATA;
2117 else if (i < (ioim->nsges - 1))
2118 sgpge->flags = BFI_SGE_DATA_CPL;
2119 else
2120 sgpge->flags = BFI_SGE_DATA_LAST;
2121
2122 bfa_sge_to_le(sgpge);
2123
2124 sgpge++;
2125 if (i == (ioim->nsges - 1)) {
2126 sgpge->flags = BFI_SGE_PGDLEN;
2127 sgpge->sga.a32.addr_lo = 0;
2128 sgpge->sga.a32.addr_hi = 0;
2129 sgpge->sg_len = pgcumsz;
2130 bfa_sge_to_le(sgpge);
2131 } else if (++sge_id == BFI_SGPG_DATA_SGES) {
2132 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
2133 sgpge->flags = BFI_SGE_LINK;
2134 sgpge->sga = sgpg->sgpg_pa;
2135 sgpge->sg_len = pgcumsz;
2136 bfa_sge_to_le(sgpge);
2137 sge_id = 0;
2138 pgcumsz = 0;
2139 }
2140 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002141 }
2142
2143 if (ioim->nsges > BFI_SGE_INLINE) {
2144 sge->sga = ioim->sgpg->sgpg_pa;
2145 } else {
2146 sge->sga.a32.addr_lo = 0;
2147 sge->sga.a32.addr_hi = 0;
2148 }
2149 sge->sg_len = pgdlen;
2150 sge->flags = BFI_SGE_PGDLEN;
2151 bfa_sge_to_be(sge);
2152
Jing Huang5fbe25c2010-10-18 17:17:23 -07002153 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002154 * set up I/O command parameters
2155 */
Jing Huang6a18b162010-10-18 17:08:54 -07002156 m->cmnd = cmnd_z0;
Maggie Zhangf3148782010-12-09 19:11:39 -08002157 int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
2158 dmadir = cmnd->sc_data_direction;
2159 if (dmadir == DMA_TO_DEVICE)
2160 m->cmnd.iodir = FCP_IODIR_WRITE;
2161 else if (dmadir == DMA_FROM_DEVICE)
2162 m->cmnd.iodir = FCP_IODIR_READ;
2163 else
2164 m->cmnd.iodir = FCP_IODIR_NONE;
2165
Jing Huang8f4bfad2010-12-26 21:50:10 -08002166 m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
Maggie Zhangf3148782010-12-09 19:11:39 -08002167 fcp_dl = scsi_bufflen(cmnd);
Jing Huangba816ea2010-10-18 17:10:50 -07002168 m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002169
Jing Huang5fbe25c2010-10-18 17:17:23 -07002170 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002171 * set up I/O message header
2172 */
2173 switch (m->cmnd.iodir) {
2174 case FCP_IODIR_READ:
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002175 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002176 bfa_stats(itnim, input_reqs);
2177 ioim->itnim->stats.rd_throughput += fcp_dl;
2178 break;
2179 case FCP_IODIR_WRITE:
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002180 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002181 bfa_stats(itnim, output_reqs);
2182 ioim->itnim->stats.wr_throughput += fcp_dl;
2183 break;
2184 case FCP_IODIR_RW:
2185 bfa_stats(itnim, input_reqs);
2186 bfa_stats(itnim, output_reqs);
2187 default:
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002188 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002189 }
2190 if (itnim->seq_rec ||
Maggie Zhangf3148782010-12-09 19:11:39 -08002191 (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002192 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002193
Jing Huang5fbe25c2010-10-18 17:17:23 -07002194 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002195 * queue I/O message to firmware
2196 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002197 bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002198 return BFA_TRUE;
2199}
2200
Jing Huang5fbe25c2010-10-18 17:17:23 -07002201/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002202 * Setup any additional SG pages needed.Inline SG element is setup
2203 * at queuing time.
2204 */
2205static bfa_boolean_t
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002206bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002207{
2208 u16 nsgpgs;
2209
Jing Huangd4b671c2010-12-26 21:46:35 -08002210 WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002211
Jing Huang5fbe25c2010-10-18 17:17:23 -07002212 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002213 * allocate SG pages needed
2214 */
2215 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
2216 if (!nsgpgs)
2217 return BFA_TRUE;
2218
2219 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
2220 != BFA_STATUS_OK) {
2221 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
2222 return BFA_FALSE;
2223 }
2224
2225 ioim->nsgpgs = nsgpgs;
Maggie Zhange3e7d3e2010-12-09 19:10:27 -08002226 ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002227
2228 return BFA_TRUE;
2229}
2230
Jing Huang5fbe25c2010-10-18 17:17:23 -07002231/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002232 * Send I/O abort request to firmware.
2233 */
2234static bfa_boolean_t
2235bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
2236{
2237 struct bfi_ioim_abort_req_s *m;
2238 enum bfi_ioim_h2i msgop;
2239
Jing Huang5fbe25c2010-10-18 17:17:23 -07002240 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002241 * check for room in queue to send request now
2242 */
2243 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
2244 if (!m)
2245 return BFA_FALSE;
2246
Jing Huang5fbe25c2010-10-18 17:17:23 -07002247 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002248 * build i/o request message next
2249 */
2250 if (ioim->iosp->abort_explicit)
2251 msgop = BFI_IOIM_H2I_IOABORT_REQ;
2252 else
2253 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
2254
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002255 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
Jing Huangba816ea2010-10-18 17:10:50 -07002256 m->io_tag = cpu_to_be16(ioim->iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002257 m->abort_tag = ++ioim->abort_tag;
2258
Jing Huang5fbe25c2010-10-18 17:17:23 -07002259 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002260 * queue I/O message to firmware
2261 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002262 bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002263 return BFA_TRUE;
2264}
2265
Jing Huang5fbe25c2010-10-18 17:17:23 -07002266/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002267 * Call to resume any I/O requests waiting for room in request queue.
2268 */
2269static void
2270bfa_ioim_qresume(void *cbarg)
2271{
2272 struct bfa_ioim_s *ioim = cbarg;
2273
2274 bfa_stats(ioim->itnim, qresumes);
2275 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
2276}
2277
2278
2279static void
2280bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
2281{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002282 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002283 * Move IO from itnim queue to fcpim global queue since itnim will be
2284 * freed.
2285 */
2286 list_del(&ioim->qe);
2287 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2288
2289 if (!ioim->iosp->tskim) {
2290 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
2291 bfa_cb_dequeue(&ioim->hcb_qe);
2292 list_del(&ioim->qe);
2293 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
2294 }
2295 bfa_itnim_iodone(ioim->itnim);
2296 } else
Maggie Zhangf7f73812010-12-09 19:08:43 -08002297 bfa_wc_down(&ioim->iosp->tskim->wc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002298}
2299
2300static bfa_boolean_t
2301bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
2302{
2303 if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
2304 (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
2305 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
2306 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
2307 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
2308 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
2309 (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
2310 return BFA_FALSE;
2311
2312 return BFA_TRUE;
2313}
2314
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002315void
2316bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
2317{
Jing Huang5fbe25c2010-10-18 17:17:23 -07002318 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002319 * If path tov timer expired, failback with PATHTOV status - these
2320 * IO requests are not normally retried by IO stack.
2321 *
2322 * Otherwise device cameback online and fail it with normal failed
2323 * status so that IO stack retries these failed IO requests.
2324 */
2325 if (iotov)
2326 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
2327 else {
2328 ioim->io_cbfn = __bfa_cb_ioim_failed;
2329 bfa_stats(ioim->itnim, iocom_nexus_abort);
2330 }
2331 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
2332
Jing Huang5fbe25c2010-10-18 17:17:23 -07002333 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002334 * Move IO to fcpim global queue since itnim will be
2335 * freed.
2336 */
2337 list_del(&ioim->qe);
2338 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2339}
2340
2341
Jing Huang5fbe25c2010-10-18 17:17:23 -07002342/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002343 * Memory allocation and initialization.
2344 */
2345void
Krishna Gudipati45070252011-06-24 20:24:29 -07002346bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002347{
2348 struct bfa_ioim_s *ioim;
Krishna Gudipati45070252011-06-24 20:24:29 -07002349 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002350 struct bfa_ioim_sp_s *iosp;
2351 u16 i;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002352
Jing Huang5fbe25c2010-10-18 17:17:23 -07002353 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002354 * claim memory first
2355 */
Krishna Gudipati45070252011-06-24 20:24:29 -07002356 ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002357 fcpim->ioim_arr = ioim;
Krishna Gudipati45070252011-06-24 20:24:29 -07002358 bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002359
Krishna Gudipati45070252011-06-24 20:24:29 -07002360 iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002361 fcpim->ioim_sp_arr = iosp;
Krishna Gudipati45070252011-06-24 20:24:29 -07002362 bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002363
Jing Huang5fbe25c2010-10-18 17:17:23 -07002364 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002365 * Initialize ioim free queues
2366 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002367 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
2368 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
2369
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002370 for (i = 0; i < fcpim->fcp->num_ioim_reqs;
2371 i++, ioim++, iosp++) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002372 /*
2373 * initialize IOIM
2374 */
Jing Huang6a18b162010-10-18 17:08:54 -07002375 memset(ioim, 0, sizeof(struct bfa_ioim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002376 ioim->iotag = i;
2377 ioim->bfa = fcpim->bfa;
2378 ioim->fcpim = fcpim;
2379 ioim->iosp = iosp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002380 INIT_LIST_HEAD(&ioim->sgpg_q);
2381 bfa_reqq_winit(&ioim->iosp->reqq_wait,
2382 bfa_ioim_qresume, ioim);
2383 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
2384 bfa_ioim_sgpg_alloced, ioim);
2385 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002386 }
2387}
2388
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002389void
2390bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2391{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002392 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002393 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2394 struct bfa_ioim_s *ioim;
2395 u16 iotag;
2396 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
2397
Jing Huangba816ea2010-10-18 17:10:50 -07002398 iotag = be16_to_cpu(rsp->io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002399
2400 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
Jing Huangd4b671c2010-12-26 21:46:35 -08002401 WARN_ON(ioim->iotag != iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002402
2403 bfa_trc(ioim->bfa, ioim->iotag);
2404 bfa_trc(ioim->bfa, rsp->io_status);
2405 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
2406
2407 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
Jing Huang6a18b162010-10-18 17:08:54 -07002408 ioim->iosp->comp_rspmsg = *m;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002409
2410 switch (rsp->io_status) {
2411 case BFI_IOIM_STS_OK:
2412 bfa_stats(ioim->itnim, iocomp_ok);
2413 if (rsp->reuse_io_tag == 0)
2414 evt = BFA_IOIM_SM_DONE;
2415 else
2416 evt = BFA_IOIM_SM_COMP;
2417 break;
2418
2419 case BFI_IOIM_STS_TIMEDOUT:
2420 bfa_stats(ioim->itnim, iocomp_timedout);
2421 case BFI_IOIM_STS_ABORTED:
2422 rsp->io_status = BFI_IOIM_STS_ABORTED;
2423 bfa_stats(ioim->itnim, iocomp_aborted);
2424 if (rsp->reuse_io_tag == 0)
2425 evt = BFA_IOIM_SM_DONE;
2426 else
2427 evt = BFA_IOIM_SM_COMP;
2428 break;
2429
2430 case BFI_IOIM_STS_PROTO_ERR:
2431 bfa_stats(ioim->itnim, iocom_proto_err);
Jing Huangd4b671c2010-12-26 21:46:35 -08002432 WARN_ON(!rsp->reuse_io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002433 evt = BFA_IOIM_SM_COMP;
2434 break;
2435
2436 case BFI_IOIM_STS_SQER_NEEDED:
2437 bfa_stats(ioim->itnim, iocom_sqer_needed);
Jing Huangd4b671c2010-12-26 21:46:35 -08002438 WARN_ON(rsp->reuse_io_tag != 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002439 evt = BFA_IOIM_SM_SQRETRY;
2440 break;
2441
2442 case BFI_IOIM_STS_RES_FREE:
2443 bfa_stats(ioim->itnim, iocom_res_free);
2444 evt = BFA_IOIM_SM_FREE;
2445 break;
2446
2447 case BFI_IOIM_STS_HOST_ABORTED:
2448 bfa_stats(ioim->itnim, iocom_hostabrts);
2449 if (rsp->abort_tag != ioim->abort_tag) {
2450 bfa_trc(ioim->bfa, rsp->abort_tag);
2451 bfa_trc(ioim->bfa, ioim->abort_tag);
2452 return;
2453 }
2454
2455 if (rsp->reuse_io_tag)
2456 evt = BFA_IOIM_SM_ABORT_COMP;
2457 else
2458 evt = BFA_IOIM_SM_ABORT_DONE;
2459 break;
2460
2461 case BFI_IOIM_STS_UTAG:
2462 bfa_stats(ioim->itnim, iocom_utags);
2463 evt = BFA_IOIM_SM_COMP_UTAG;
2464 break;
2465
2466 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08002467 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002468 }
2469
2470 bfa_sm_send_event(ioim, evt);
2471}
2472
2473void
2474bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2475{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002476 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002477 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
2478 struct bfa_ioim_s *ioim;
2479 u16 iotag;
2480
Jing Huangba816ea2010-10-18 17:10:50 -07002481 iotag = be16_to_cpu(rsp->io_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002482
2483 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
Jing Huangd4b671c2010-12-26 21:46:35 -08002484 WARN_ON(BFA_IOIM_TAG_2_ID(ioim->iotag) != iotag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002485
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002486 bfa_ioim_cb_profile_comp(fcpim, ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002487 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
2488}
2489
Jing Huang5fbe25c2010-10-18 17:17:23 -07002490/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002491 * Called by itnim to clean up IO while going offline.
2492 */
2493void
2494bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
2495{
2496 bfa_trc(ioim->bfa, ioim->iotag);
2497 bfa_stats(ioim->itnim, io_cleanups);
2498
2499 ioim->iosp->tskim = NULL;
2500 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2501}
2502
2503void
2504bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
2505{
2506 bfa_trc(ioim->bfa, ioim->iotag);
2507 bfa_stats(ioim->itnim, io_tmaborts);
2508
2509 ioim->iosp->tskim = tskim;
2510 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
2511}
2512
Jing Huang5fbe25c2010-10-18 17:17:23 -07002513/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002514 * IOC failure handling.
2515 */
2516void
2517bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
2518{
2519 bfa_trc(ioim->bfa, ioim->iotag);
2520 bfa_stats(ioim->itnim, io_iocdowns);
2521 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
2522}
2523
Jing Huang5fbe25c2010-10-18 17:17:23 -07002524/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002525 * IO offline TOV popped. Fail the pending IO.
2526 */
2527void
2528bfa_ioim_tov(struct bfa_ioim_s *ioim)
2529{
2530 bfa_trc(ioim->bfa, ioim->iotag);
2531 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
2532}
2533
2534
Jing Huang5fbe25c2010-10-18 17:17:23 -07002535/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002536 * Allocate IOIM resource for initiator mode I/O request.
2537 */
2538struct bfa_ioim_s *
2539bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
2540 struct bfa_itnim_s *itnim, u16 nsges)
2541{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002542 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002543 struct bfa_ioim_s *ioim;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002544 struct bfa_iotag_s *iotag = NULL;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002545
Jing Huang5fbe25c2010-10-18 17:17:23 -07002546 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002547 * alocate IOIM resource
2548 */
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002549 bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
2550 if (!iotag) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002551 bfa_stats(itnim, no_iotags);
2552 return NULL;
2553 }
2554
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002555 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
2556
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002557 ioim->dio = dio;
2558 ioim->itnim = itnim;
2559 ioim->nsges = nsges;
2560 ioim->nsgpgs = 0;
2561
2562 bfa_stats(itnim, total_ios);
2563 fcpim->ios_active++;
2564
2565 list_add_tail(&ioim->qe, &itnim->io_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002566
2567 return ioim;
2568}
2569
2570void
2571bfa_ioim_free(struct bfa_ioim_s *ioim)
2572{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002573 struct bfa_fcpim_s *fcpim = ioim->fcpim;
2574 struct bfa_iotag_s *iotag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002575
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002576 if (ioim->nsgpgs > 0)
2577 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
2578
2579 bfa_stats(ioim->itnim, io_comps);
2580 fcpim->ios_active--;
2581
Krishna Gudipati15821f02010-12-13 16:23:27 -08002582 ioim->iotag &= BFA_IOIM_IOTAG_MASK;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07002583
2584 WARN_ON(!(ioim->iotag <
2585 (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
2586 iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
2587
2588 if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
2589 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
2590 else
2591 list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
2592
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002593 list_del(&ioim->qe);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002594}
2595
2596void
2597bfa_ioim_start(struct bfa_ioim_s *ioim)
2598{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002599 bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
2600
Jing Huang5fbe25c2010-10-18 17:17:23 -07002601 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002602 * Obtain the queue over which this request has to be issued
2603 */
2604 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
Maggie Zhangf3148782010-12-09 19:11:39 -08002605 BFA_FALSE : bfa_itnim_get_reqq(ioim);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002606
2607 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
2608}
2609
Jing Huang5fbe25c2010-10-18 17:17:23 -07002610/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002611 * Driver I/O abort request.
2612 */
2613bfa_status_t
2614bfa_ioim_abort(struct bfa_ioim_s *ioim)
2615{
2616
2617 bfa_trc(ioim->bfa, ioim->iotag);
2618
2619 if (!bfa_ioim_is_abortable(ioim))
2620 return BFA_STATUS_FAILED;
2621
2622 bfa_stats(ioim->itnim, io_aborts);
2623 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
2624
2625 return BFA_STATUS_OK;
2626}
2627
Jing Huang5fbe25c2010-10-18 17:17:23 -07002628/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002629 * BFA TSKIM state machine functions
2630 */
2631
Jing Huang5fbe25c2010-10-18 17:17:23 -07002632/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002633 * Task management command beginning state.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002634 */
2635static void
2636bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2637{
2638 bfa_trc(tskim->bfa, event);
2639
2640 switch (event) {
2641 case BFA_TSKIM_SM_START:
2642 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2643 bfa_tskim_gather_ios(tskim);
2644
Jing Huang5fbe25c2010-10-18 17:17:23 -07002645 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002646 * If device is offline, do not send TM on wire. Just cleanup
2647 * any pending IO requests and complete TM request.
2648 */
2649 if (!bfa_itnim_is_online(tskim->itnim)) {
2650 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2651 tskim->tsk_status = BFI_TSKIM_STS_OK;
2652 bfa_tskim_cleanup_ios(tskim);
2653 return;
2654 }
2655
2656 if (!bfa_tskim_send(tskim)) {
2657 bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
2658 bfa_stats(tskim->itnim, tm_qwait);
2659 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2660 &tskim->reqq_wait);
2661 }
2662 break;
2663
2664 default:
2665 bfa_sm_fault(tskim->bfa, event);
2666 }
2667}
2668
Jing Huang5fbe25c2010-10-18 17:17:23 -07002669/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002670 * TM command is active, awaiting completion from firmware to
2671 * cleanup IO requests in TM scope.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002672 */
2673static void
2674bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2675{
2676 bfa_trc(tskim->bfa, event);
2677
2678 switch (event) {
2679 case BFA_TSKIM_SM_DONE:
2680 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2681 bfa_tskim_cleanup_ios(tskim);
2682 break;
2683
2684 case BFA_TSKIM_SM_CLEANUP:
2685 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2686 if (!bfa_tskim_send_abort(tskim)) {
2687 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
2688 bfa_stats(tskim->itnim, tm_qwait);
2689 bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
2690 &tskim->reqq_wait);
2691 }
2692 break;
2693
2694 case BFA_TSKIM_SM_HWFAIL:
2695 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2696 bfa_tskim_iocdisable_ios(tskim);
2697 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2698 break;
2699
2700 default:
2701 bfa_sm_fault(tskim->bfa, event);
2702 }
2703}
2704
Jing Huang5fbe25c2010-10-18 17:17:23 -07002705/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002706 * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
2707 * completion event from firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002708 */
2709static void
2710bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2711{
2712 bfa_trc(tskim->bfa, event);
2713
2714 switch (event) {
2715 case BFA_TSKIM_SM_DONE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002716 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002717 * Ignore and wait for ABORT completion from firmware.
2718 */
2719 break;
2720
2721 case BFA_TSKIM_SM_CLEANUP_DONE:
2722 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2723 bfa_tskim_cleanup_ios(tskim);
2724 break;
2725
2726 case BFA_TSKIM_SM_HWFAIL:
2727 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2728 bfa_tskim_iocdisable_ios(tskim);
2729 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2730 break;
2731
2732 default:
2733 bfa_sm_fault(tskim->bfa, event);
2734 }
2735}
2736
2737static void
2738bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2739{
2740 bfa_trc(tskim->bfa, event);
2741
2742 switch (event) {
2743 case BFA_TSKIM_SM_IOS_DONE:
2744 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2745 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
2746 break;
2747
2748 case BFA_TSKIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002749 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002750 * Ignore, TM command completed on wire.
2751 * Notify TM conmpletion on IO cleanup completion.
2752 */
2753 break;
2754
2755 case BFA_TSKIM_SM_HWFAIL:
2756 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2757 bfa_tskim_iocdisable_ios(tskim);
2758 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2759 break;
2760
2761 default:
2762 bfa_sm_fault(tskim->bfa, event);
2763 }
2764}
2765
Jing Huang5fbe25c2010-10-18 17:17:23 -07002766/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002767 * Task management command is waiting for room in request CQ
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002768 */
2769static void
2770bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2771{
2772 bfa_trc(tskim->bfa, event);
2773
2774 switch (event) {
2775 case BFA_TSKIM_SM_QRESUME:
2776 bfa_sm_set_state(tskim, bfa_tskim_sm_active);
2777 bfa_tskim_send(tskim);
2778 break;
2779
2780 case BFA_TSKIM_SM_CLEANUP:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002781 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002782 * No need to send TM on wire since ITN is offline.
2783 */
2784 bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
2785 bfa_reqq_wcancel(&tskim->reqq_wait);
2786 bfa_tskim_cleanup_ios(tskim);
2787 break;
2788
2789 case BFA_TSKIM_SM_HWFAIL:
2790 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2791 bfa_reqq_wcancel(&tskim->reqq_wait);
2792 bfa_tskim_iocdisable_ios(tskim);
2793 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2794 break;
2795
2796 default:
2797 bfa_sm_fault(tskim->bfa, event);
2798 }
2799}
2800
Jing Huang5fbe25c2010-10-18 17:17:23 -07002801/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002802 * Task management command is active, awaiting for room in request CQ
2803 * to send clean up request.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002804 */
2805static void
2806bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
2807 enum bfa_tskim_event event)
2808{
2809 bfa_trc(tskim->bfa, event);
2810
2811 switch (event) {
2812 case BFA_TSKIM_SM_DONE:
2813 bfa_reqq_wcancel(&tskim->reqq_wait);
Jing Huang5fbe25c2010-10-18 17:17:23 -07002814 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002815 * Fall through !!!
2816 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002817 case BFA_TSKIM_SM_QRESUME:
2818 bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
2819 bfa_tskim_send_abort(tskim);
2820 break;
2821
2822 case BFA_TSKIM_SM_HWFAIL:
2823 bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
2824 bfa_reqq_wcancel(&tskim->reqq_wait);
2825 bfa_tskim_iocdisable_ios(tskim);
2826 bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
2827 break;
2828
2829 default:
2830 bfa_sm_fault(tskim->bfa, event);
2831 }
2832}
2833
Jing Huang5fbe25c2010-10-18 17:17:23 -07002834/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002835 * BFA callback is pending
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002836 */
2837static void
2838bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
2839{
2840 bfa_trc(tskim->bfa, event);
2841
2842 switch (event) {
2843 case BFA_TSKIM_SM_HCB:
2844 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
2845 bfa_tskim_free(tskim);
2846 break;
2847
2848 case BFA_TSKIM_SM_CLEANUP:
2849 bfa_tskim_notify_comp(tskim);
2850 break;
2851
2852 case BFA_TSKIM_SM_HWFAIL:
2853 break;
2854
2855 default:
2856 bfa_sm_fault(tskim->bfa, event);
2857 }
2858}
2859
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002860static void
2861__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
2862{
2863 struct bfa_tskim_s *tskim = cbarg;
2864
2865 if (!complete) {
2866 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2867 return;
2868 }
2869
2870 bfa_stats(tskim->itnim, tm_success);
2871 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
2872}
2873
2874static void
2875__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
2876{
2877 struct bfa_tskim_s *tskim = cbarg;
2878
2879 if (!complete) {
2880 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
2881 return;
2882 }
2883
2884 bfa_stats(tskim->itnim, tm_failures);
2885 bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
2886 BFI_TSKIM_STS_FAILED);
2887}
2888
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002889static bfa_boolean_t
Maggie Zhangf3148782010-12-09 19:11:39 -08002890bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002891{
2892 switch (tskim->tm_cmnd) {
2893 case FCP_TM_TARGET_RESET:
2894 return BFA_TRUE;
2895
2896 case FCP_TM_ABORT_TASK_SET:
2897 case FCP_TM_CLEAR_TASK_SET:
2898 case FCP_TM_LUN_RESET:
2899 case FCP_TM_CLEAR_ACA:
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002900 return !memcmp(&tskim->lun, &lun, sizeof(lun));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002901
2902 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08002903 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002904 }
2905
2906 return BFA_FALSE;
2907}
2908
Jing Huang5fbe25c2010-10-18 17:17:23 -07002909/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002910 * Gather affected IO requests and task management commands.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002911 */
2912static void
2913bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
2914{
2915 struct bfa_itnim_s *itnim = tskim->itnim;
2916 struct bfa_ioim_s *ioim;
Maggie Zhangf3148782010-12-09 19:11:39 -08002917 struct list_head *qe, *qen;
2918 struct scsi_cmnd *cmnd;
2919 struct scsi_lun scsilun;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002920
2921 INIT_LIST_HEAD(&tskim->io_q);
2922
Jing Huang5fbe25c2010-10-18 17:17:23 -07002923 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002924 * Gather any active IO requests first.
2925 */
2926 list_for_each_safe(qe, qen, &itnim->io_q) {
2927 ioim = (struct bfa_ioim_s *) qe;
Maggie Zhangf3148782010-12-09 19:11:39 -08002928 cmnd = (struct scsi_cmnd *) ioim->dio;
2929 int_to_scsilun(cmnd->device->lun, &scsilun);
2930 if (bfa_tskim_match_scope(tskim, scsilun)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002931 list_del(&ioim->qe);
2932 list_add_tail(&ioim->qe, &tskim->io_q);
2933 }
2934 }
2935
Jing Huang5fbe25c2010-10-18 17:17:23 -07002936 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002937 * Failback any pending IO requests immediately.
2938 */
2939 list_for_each_safe(qe, qen, &itnim->pending_q) {
2940 ioim = (struct bfa_ioim_s *) qe;
Maggie Zhangf3148782010-12-09 19:11:39 -08002941 cmnd = (struct scsi_cmnd *) ioim->dio;
2942 int_to_scsilun(cmnd->device->lun, &scsilun);
2943 if (bfa_tskim_match_scope(tskim, scsilun)) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002944 list_del(&ioim->qe);
2945 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
2946 bfa_ioim_tov(ioim);
2947 }
2948 }
2949}
2950
Jing Huang5fbe25c2010-10-18 17:17:23 -07002951/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002952 * IO cleanup completion
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002953 */
2954static void
2955bfa_tskim_cleanp_comp(void *tskim_cbarg)
2956{
2957 struct bfa_tskim_s *tskim = tskim_cbarg;
2958
2959 bfa_stats(tskim->itnim, tm_io_comps);
2960 bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
2961}
2962
Jing Huang5fbe25c2010-10-18 17:17:23 -07002963/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002964 * Gather affected IO requests and task management commands.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002965 */
2966static void
2967bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
2968{
2969 struct bfa_ioim_s *ioim;
2970 struct list_head *qe, *qen;
2971
2972 bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
2973
2974 list_for_each_safe(qe, qen, &tskim->io_q) {
2975 ioim = (struct bfa_ioim_s *) qe;
2976 bfa_wc_up(&tskim->wc);
2977 bfa_ioim_cleanup_tm(ioim, tskim);
2978 }
2979
2980 bfa_wc_wait(&tskim->wc);
2981}
2982
Jing Huang5fbe25c2010-10-18 17:17:23 -07002983/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08002984 * Send task management request to firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002985 */
2986static bfa_boolean_t
2987bfa_tskim_send(struct bfa_tskim_s *tskim)
2988{
2989 struct bfa_itnim_s *itnim = tskim->itnim;
2990 struct bfi_tskim_req_s *m;
2991
Jing Huang5fbe25c2010-10-18 17:17:23 -07002992 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002993 * check for room in queue to send request now
2994 */
2995 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
2996 if (!m)
2997 return BFA_FALSE;
2998
Jing Huang5fbe25c2010-10-18 17:17:23 -07002999 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003000 * build i/o request message next
3001 */
3002 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003003 bfa_fn_lpu(tskim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003004
Jing Huangba816ea2010-10-18 17:10:50 -07003005 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003006 m->itn_fhdl = tskim->itnim->rport->fw_handle;
3007 m->t_secs = tskim->tsecs;
3008 m->lun = tskim->lun;
3009 m->tm_flags = tskim->tm_cmnd;
3010
Jing Huang5fbe25c2010-10-18 17:17:23 -07003011 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003012 * queue I/O message to firmware
3013 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003014 bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003015 return BFA_TRUE;
3016}
3017
Jing Huang5fbe25c2010-10-18 17:17:23 -07003018/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003019 * Send abort request to cleanup an active TM to firmware.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003020 */
3021static bfa_boolean_t
3022bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
3023{
3024 struct bfa_itnim_s *itnim = tskim->itnim;
3025 struct bfi_tskim_abortreq_s *m;
3026
Jing Huang5fbe25c2010-10-18 17:17:23 -07003027 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003028 * check for room in queue to send request now
3029 */
3030 m = bfa_reqq_next(tskim->bfa, itnim->reqq);
3031 if (!m)
3032 return BFA_FALSE;
3033
Jing Huang5fbe25c2010-10-18 17:17:23 -07003034 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003035 * build i/o request message next
3036 */
3037 bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003038 bfa_fn_lpu(tskim->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003039
Jing Huangba816ea2010-10-18 17:10:50 -07003040 m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003041
Jing Huang5fbe25c2010-10-18 17:17:23 -07003042 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003043 * queue I/O message to firmware
3044 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003045 bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003046 return BFA_TRUE;
3047}
3048
Jing Huang5fbe25c2010-10-18 17:17:23 -07003049/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003050 * Call to resume task management cmnd waiting for room in request queue.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003051 */
3052static void
3053bfa_tskim_qresume(void *cbarg)
3054{
3055 struct bfa_tskim_s *tskim = cbarg;
3056
3057 bfa_stats(tskim->itnim, tm_qresumes);
3058 bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
3059}
3060
Jing Huang5fbe25c2010-10-18 17:17:23 -07003061/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003062 * Cleanup IOs associated with a task mangement command on IOC failures.
3063 */
3064static void
3065bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
3066{
3067 struct bfa_ioim_s *ioim;
3068 struct list_head *qe, *qen;
3069
3070 list_for_each_safe(qe, qen, &tskim->io_q) {
3071 ioim = (struct bfa_ioim_s *) qe;
3072 bfa_ioim_iocdisable(ioim);
3073 }
3074}
3075
Jing Huang5fbe25c2010-10-18 17:17:23 -07003076/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003077 * Notification on completions from related ioim.
3078 */
3079void
3080bfa_tskim_iodone(struct bfa_tskim_s *tskim)
3081{
3082 bfa_wc_down(&tskim->wc);
3083}
3084
Jing Huang5fbe25c2010-10-18 17:17:23 -07003085/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003086 * Handle IOC h/w failure notification from itnim.
3087 */
3088void
3089bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
3090{
3091 tskim->notify = BFA_FALSE;
3092 bfa_stats(tskim->itnim, tm_iocdowns);
3093 bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
3094}
3095
Jing Huang5fbe25c2010-10-18 17:17:23 -07003096/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003097 * Cleanup TM command and associated IOs as part of ITNIM offline.
3098 */
3099void
3100bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
3101{
3102 tskim->notify = BFA_TRUE;
3103 bfa_stats(tskim->itnim, tm_cleanups);
3104 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
3105}
3106
Jing Huang5fbe25c2010-10-18 17:17:23 -07003107/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003108 * Memory allocation and initialization.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003109 */
3110void
Krishna Gudipati45070252011-06-24 20:24:29 -07003111bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003112{
3113 struct bfa_tskim_s *tskim;
Krishna Gudipati45070252011-06-24 20:24:29 -07003114 struct bfa_fcp_mod_s *fcp = fcpim->fcp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003115 u16 i;
3116
3117 INIT_LIST_HEAD(&fcpim->tskim_free_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003118 INIT_LIST_HEAD(&fcpim->tskim_unused_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003119
Krishna Gudipati45070252011-06-24 20:24:29 -07003120 tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003121 fcpim->tskim_arr = tskim;
3122
3123 for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
3124 /*
3125 * initialize TSKIM
3126 */
Jing Huang6a18b162010-10-18 17:08:54 -07003127 memset(tskim, 0, sizeof(struct bfa_tskim_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003128 tskim->tsk_tag = i;
3129 tskim->bfa = fcpim->bfa;
3130 tskim->fcpim = fcpim;
3131 tskim->notify = BFA_FALSE;
3132 bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
3133 tskim);
3134 bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
3135
3136 list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
3137 }
3138
Krishna Gudipati45070252011-06-24 20:24:29 -07003139 bfa_mem_kva_curp(fcp) = (u8 *) tskim;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003140}
3141
3142void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003143bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3144{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003145 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003146 struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
3147 struct bfa_tskim_s *tskim;
Jing Huangba816ea2010-10-18 17:10:50 -07003148 u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003149
3150 tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
Jing Huangd4b671c2010-12-26 21:46:35 -08003151 WARN_ON(tskim->tsk_tag != tsk_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003152
3153 tskim->tsk_status = rsp->tsk_status;
3154
Jing Huang5fbe25c2010-10-18 17:17:23 -07003155 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003156 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
3157 * requests. All other statuses are for normal completions.
3158 */
3159 if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
3160 bfa_stats(tskim->itnim, tm_cleanup_comps);
3161 bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
3162 } else {
3163 bfa_stats(tskim->itnim, tm_fw_rsps);
3164 bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
3165 }
3166}
3167
3168
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003169struct bfa_tskim_s *
3170bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
3171{
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003172 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003173 struct bfa_tskim_s *tskim;
3174
3175 bfa_q_deq(&fcpim->tskim_free_q, &tskim);
3176
3177 if (tskim)
3178 tskim->dtsk = dtsk;
3179
3180 return tskim;
3181}
3182
3183void
3184bfa_tskim_free(struct bfa_tskim_s *tskim)
3185{
Jing Huangd4b671c2010-12-26 21:46:35 -08003186 WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003187 list_del(&tskim->qe);
3188 list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
3189}
3190
Jing Huang5fbe25c2010-10-18 17:17:23 -07003191/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08003192 * Start a task management command.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003193 *
3194 * @param[in] tskim BFA task management command instance
3195 * @param[in] itnim i-t nexus for the task management command
3196 * @param[in] lun lun, if applicable
3197 * @param[in] tm_cmnd Task management command code.
3198 * @param[in] t_secs Timeout in seconds
3199 *
3200 * @return None.
3201 */
3202void
Maggie Zhangf3148782010-12-09 19:11:39 -08003203bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
3204 struct scsi_lun lun,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003205 enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
3206{
3207 tskim->itnim = itnim;
3208 tskim->lun = lun;
3209 tskim->tm_cmnd = tm_cmnd;
3210 tskim->tsecs = tsecs;
3211 tskim->notify = BFA_FALSE;
3212 bfa_stats(itnim, tm_cmnds);
3213
3214 list_add_tail(&tskim->qe, &itnim->tsk_q);
3215 bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
3216}
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003217
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003218void
3219bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
3220{
3221 struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
3222 struct list_head *qe;
3223 int i;
3224
3225 for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
3226 bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
3227 list_add_tail(qe, &fcpim->tskim_unused_q);
3228 }
3229}
3230
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003231/* BFA FCP module - parent module for fcpim */
3232
3233BFA_MODULE(fcp);
3234
3235static void
Krishna Gudipati45070252011-06-24 20:24:29 -07003236bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
3237 struct bfa_s *bfa)
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003238{
Krishna Gudipati45070252011-06-24 20:24:29 -07003239 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3240 struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
3241 struct bfa_mem_dma_s *seg_ptr;
3242 u16 nsegs, idx, per_seg_ios, num_io_req;
3243 u32 km_len = 0;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003244
3245 /*
3246 * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
3247 * So if the values are non zero, adjust them appropriately.
3248 */
3249 if (cfg->fwcfg.num_ioim_reqs &&
3250 cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
3251 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
3252 else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
3253 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3254
3255 if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
3256 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3257
3258 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3259 if (num_io_req > BFA_IO_MAX) {
3260 if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
3261 cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
3262 cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
3263 } else if (cfg->fwcfg.num_fwtio_reqs)
3264 cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
3265 else
3266 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
3267 }
3268
Krishna Gudipati45070252011-06-24 20:24:29 -07003269 bfa_fcpim_meminfo(cfg, &km_len);
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003270
3271 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
Krishna Gudipati45070252011-06-24 20:24:29 -07003272 km_len += num_io_req * sizeof(struct bfa_iotag_s);
3273 km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
3274
3275 /* dma memory */
3276 nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
3277 per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
3278
3279 bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
3280 if (num_io_req >= per_seg_ios) {
3281 num_io_req -= per_seg_ios;
3282 bfa_mem_dma_setup(minfo, seg_ptr,
3283 per_seg_ios * BFI_IOIM_SNSLEN);
3284 } else
3285 bfa_mem_dma_setup(minfo, seg_ptr,
3286 num_io_req * BFI_IOIM_SNSLEN);
3287 }
3288
3289 /* kva memory */
3290 bfa_mem_kva_setup(minfo, fcp_kva, km_len);
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003291}
3292
3293static void
3294bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07003295 struct bfa_pcidev_s *pcidev)
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003296{
3297 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
Krishna Gudipati45070252011-06-24 20:24:29 -07003298 struct bfa_mem_dma_s *seg_ptr;
3299 u16 idx, nsegs, num_io_req;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003300
3301 fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
3302 fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
Krishna Gudipati45070252011-06-24 20:24:29 -07003303 fcp->num_itns = cfg->fwcfg.num_rports;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003304 fcp->bfa = bfa;
3305
Krishna Gudipati45070252011-06-24 20:24:29 -07003306 /*
3307 * Setup the pool of snsbase addr's, that is passed to fw as
3308 * part of bfi_iocfc_cfg_s.
3309 */
3310 num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
3311 nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003312
Krishna Gudipati45070252011-06-24 20:24:29 -07003313 bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003314
Krishna Gudipati45070252011-06-24 20:24:29 -07003315 if (!bfa_mem_dma_virt(seg_ptr))
3316 break;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003317
Krishna Gudipati45070252011-06-24 20:24:29 -07003318 fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
3319 fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
3320 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
3321 }
3322
3323 bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
3324
3325 bfa_iotag_attach(fcp);
3326
3327 fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
3328 bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003329 (fcp->num_itns * sizeof(struct bfa_itn_s));
3330 memset(fcp->itn_arr, 0,
3331 (fcp->num_itns * sizeof(struct bfa_itn_s)));
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003332}
3333
3334static void
3335bfa_fcp_detach(struct bfa_s *bfa)
3336{
3337}
3338
3339static void
3340bfa_fcp_start(struct bfa_s *bfa)
3341{
3342}
3343
3344static void
3345bfa_fcp_stop(struct bfa_s *bfa)
3346{
3347}
3348
3349static void
3350bfa_fcp_iocdisable(struct bfa_s *bfa)
3351{
3352 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3353
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003354 /* Enqueue unused ioim resources to free_q */
3355 list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q);
3356
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003357 bfa_fcpim_iocdisable(fcp);
3358}
3359
3360void
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003361bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw)
3362{
3363 struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
3364 struct list_head *qe;
3365 int i;
3366
3367 for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
3368 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
3369 list_add_tail(qe, &mod->iotag_unused_q);
3370 }
3371}
3372
3373void
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003374bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
3375 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
3376{
3377 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3378 struct bfa_itn_s *itn;
3379
3380 itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
3381 itn->isr = isr;
3382}
3383
3384/*
3385 * Itn interrupt processing.
3386 */
3387void
3388bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
3389{
3390 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
3391 union bfi_itn_i2h_msg_u msg;
3392 struct bfa_itn_s *itn;
3393
3394 msg.msg = m;
3395 itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
3396
3397 if (itn->isr)
3398 itn->isr(bfa, m);
3399 else
3400 WARN_ON(1);
3401}
3402
3403void
Krishna Gudipati45070252011-06-24 20:24:29 -07003404bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003405{
3406 struct bfa_iotag_s *iotag;
3407 u16 num_io_req, i;
3408
Krishna Gudipati45070252011-06-24 20:24:29 -07003409 iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003410 fcp->iotag_arr = iotag;
3411
3412 INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
3413 INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003414 INIT_LIST_HEAD(&fcp->iotag_unused_q);
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003415
3416 num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
3417 for (i = 0; i < num_io_req; i++, iotag++) {
3418 memset(iotag, 0, sizeof(struct bfa_iotag_s));
3419 iotag->tag = i;
3420 if (i < fcp->num_ioim_reqs)
3421 list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
3422 else
3423 list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
3424 }
3425
Krishna Gudipati45070252011-06-24 20:24:29 -07003426 bfa_mem_kva_curp(fcp) = (u8 *) iotag;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07003427}