blob: 680b87d8f0d9c31656eed53e12d4d3a16626e8bc [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <cs/bfa_debug.h>
20#include <bfa_cb_ioim_macros.h>
21
22BFA_TRC_FILE(HAL, IOIM);
23
24/*
25 * forward declarations.
26 */
27static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
28static bfa_boolean_t bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
29static void bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
30static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
31static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
32static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
33static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
34static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
35static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
36static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
37
38/**
39 * bfa_ioim_sm
40 */
41
42/**
43 * IO state machine events
44 */
45enum bfa_ioim_event {
46 BFA_IOIM_SM_START = 1, /* io start request from host */
47 BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
48 BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
49 BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
50 BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
51 BFA_IOIM_SM_FREE = 6, /* io resource is freed */
52 BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
53 BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
54 BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
55 BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
56 BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
57 BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
58 BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
59 BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
60 BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
61 BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
62 BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
63 BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
64};
65
66/*
67 * forward declaration of IO state machine
68 */
69static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
70 enum bfa_ioim_event event);
71static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
72 enum bfa_ioim_event event);
73static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
74 enum bfa_ioim_event event);
75static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
76 enum bfa_ioim_event event);
77static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
78 enum bfa_ioim_event event);
79static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
80 enum bfa_ioim_event event);
81static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
82 enum bfa_ioim_event event);
83static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
84 enum bfa_ioim_event event);
85static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
86 enum bfa_ioim_event event);
87static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
88 enum bfa_ioim_event event);
89static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
90 enum bfa_ioim_event event);
91
92/**
93 * IO is not started (unallocated).
94 */
95static void
96bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
97{
98 bfa_trc_fp(ioim->bfa, ioim->iotag);
99 bfa_trc_fp(ioim->bfa, event);
100
101 switch (event) {
102 case BFA_IOIM_SM_START:
103 if (!bfa_itnim_is_online(ioim->itnim)) {
104 if (!bfa_itnim_hold_io(ioim->itnim)) {
105 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
106 list_del(&ioim->qe);
107 list_add_tail(&ioim->qe,
Jing Huangf8ceafd2009-09-25 12:29:54 -0700108 &ioim->fcpim->ioim_comp_q);
Jing Huang7725ccf2009-09-23 17:46:15 -0700109 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
110 __bfa_cb_ioim_pathtov, ioim);
111 } else {
112 list_del(&ioim->qe);
113 list_add_tail(&ioim->qe,
Jing Huangf8ceafd2009-09-25 12:29:54 -0700114 &ioim->itnim->pending_q);
Jing Huang7725ccf2009-09-23 17:46:15 -0700115 }
116 break;
117 }
118
119 if (ioim->nsges > BFI_SGE_INLINE) {
120 if (!bfa_ioim_sge_setup(ioim)) {
121 bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
122 return;
123 }
124 }
125
126 if (!bfa_ioim_send_ioreq(ioim)) {
127 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
128 break;
129 }
130
131 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
132 break;
133
134 case BFA_IOIM_SM_IOTOV:
135 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
136 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
137 __bfa_cb_ioim_pathtov, ioim);
138 break;
139
140 case BFA_IOIM_SM_ABORT:
141 /**
142 * IO in pending queue can get abort requests. Complete abort
143 * requests immediately.
144 */
145 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
146 bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
147 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
148 ioim);
149 break;
150
151 default:
Krishna Gudipatie641de32010-03-05 19:35:02 -0800152 bfa_sm_fault(ioim->bfa, event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700153 }
154}
155
156/**
157 * IO is waiting for SG pages.
158 */
159static void
160bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
161{
162 bfa_trc(ioim->bfa, ioim->iotag);
163 bfa_trc(ioim->bfa, event);
164
165 switch (event) {
166 case BFA_IOIM_SM_SGALLOCED:
167 if (!bfa_ioim_send_ioreq(ioim)) {
168 bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
169 break;
170 }
171 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
172 break;
173
174 case BFA_IOIM_SM_CLEANUP:
175 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
176 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
177 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
178 ioim);
179 bfa_ioim_notify_cleanup(ioim);
180 break;
181
182 case BFA_IOIM_SM_ABORT:
183 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
184 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
185 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
186 ioim);
187 break;
188
189 case BFA_IOIM_SM_HWFAIL:
190 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
191 bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
192 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
193 ioim);
194 break;
195
196 default:
Krishna Gudipatie641de32010-03-05 19:35:02 -0800197 bfa_sm_fault(ioim->bfa, event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700198 }
199}
200
201/**
202 * IO is active.
203 */
204static void
205bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
206{
207 bfa_trc_fp(ioim->bfa, ioim->iotag);
208 bfa_trc_fp(ioim->bfa, event);
209
210 switch (event) {
211 case BFA_IOIM_SM_COMP_GOOD:
212 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
213 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
214 __bfa_cb_ioim_good_comp, ioim);
215 break;
216
217 case BFA_IOIM_SM_COMP:
218 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
219 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
220 ioim);
221 break;
222
223 case BFA_IOIM_SM_DONE:
224 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
225 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
226 ioim);
227 break;
228
229 case BFA_IOIM_SM_ABORT:
230 ioim->iosp->abort_explicit = BFA_TRUE;
231 ioim->io_cbfn = __bfa_cb_ioim_abort;
232
233 if (bfa_ioim_send_abort(ioim))
234 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
235 else {
236 bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
Jing Huang36d345a2010-07-08 19:57:33 -0700237 bfa_reqq_wait(ioim->bfa, ioim->reqq,
238 &ioim->iosp->reqq_wait);
Jing Huang7725ccf2009-09-23 17:46:15 -0700239 }
240 break;
241
242 case BFA_IOIM_SM_CLEANUP:
243 ioim->iosp->abort_explicit = BFA_FALSE;
244 ioim->io_cbfn = __bfa_cb_ioim_failed;
245
246 if (bfa_ioim_send_abort(ioim))
247 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
248 else {
249 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
Jing Huang36d345a2010-07-08 19:57:33 -0700250 bfa_reqq_wait(ioim->bfa, ioim->reqq,
251 &ioim->iosp->reqq_wait);
Jing Huang7725ccf2009-09-23 17:46:15 -0700252 }
253 break;
254
255 case BFA_IOIM_SM_HWFAIL:
256 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
257 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
258 ioim);
259 break;
260
261 default:
Krishna Gudipatie641de32010-03-05 19:35:02 -0800262 bfa_sm_fault(ioim->bfa, event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700263 }
264}
265
266/**
267 * IO is being aborted, waiting for completion from firmware.
268 */
269static void
270bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
271{
272 bfa_trc(ioim->bfa, ioim->iotag);
273 bfa_trc(ioim->bfa, event);
274
275 switch (event) {
276 case BFA_IOIM_SM_COMP_GOOD:
277 case BFA_IOIM_SM_COMP:
278 case BFA_IOIM_SM_DONE:
279 case BFA_IOIM_SM_FREE:
280 break;
281
282 case BFA_IOIM_SM_ABORT_DONE:
283 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
284 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
285 ioim);
286 break;
287
288 case BFA_IOIM_SM_ABORT_COMP:
289 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
290 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
291 ioim);
292 break;
293
294 case BFA_IOIM_SM_COMP_UTAG:
295 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
296 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
297 ioim);
298 break;
299
300 case BFA_IOIM_SM_CLEANUP:
301 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
302 ioim->iosp->abort_explicit = BFA_FALSE;
303
304 if (bfa_ioim_send_abort(ioim))
305 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
306 else {
307 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
Jing Huang36d345a2010-07-08 19:57:33 -0700308 bfa_reqq_wait(ioim->bfa, ioim->reqq,
Jing Huang7725ccf2009-09-23 17:46:15 -0700309 &ioim->iosp->reqq_wait);
310 }
311 break;
312
313 case BFA_IOIM_SM_HWFAIL:
314 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
315 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
316 ioim);
317 break;
318
319 default:
Krishna Gudipatie641de32010-03-05 19:35:02 -0800320 bfa_sm_fault(ioim->bfa, event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700321 }
322}
323
324/**
325 * IO is being cleaned up (implicit abort), waiting for completion from
326 * firmware.
327 */
328static void
329bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
330{
331 bfa_trc(ioim->bfa, ioim->iotag);
332 bfa_trc(ioim->bfa, event);
333
334 switch (event) {
335 case BFA_IOIM_SM_COMP_GOOD:
336 case BFA_IOIM_SM_COMP:
337 case BFA_IOIM_SM_DONE:
338 case BFA_IOIM_SM_FREE:
339 break;
340
341 case BFA_IOIM_SM_ABORT:
342 /**
343 * IO is already being aborted implicitly
344 */
345 ioim->io_cbfn = __bfa_cb_ioim_abort;
346 break;
347
348 case BFA_IOIM_SM_ABORT_DONE:
349 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
350 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
351 bfa_ioim_notify_cleanup(ioim);
352 break;
353
354 case BFA_IOIM_SM_ABORT_COMP:
355 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
356 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
357 bfa_ioim_notify_cleanup(ioim);
358 break;
359
360 case BFA_IOIM_SM_COMP_UTAG:
361 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
362 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
363 bfa_ioim_notify_cleanup(ioim);
364 break;
365
366 case BFA_IOIM_SM_HWFAIL:
367 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
368 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
369 ioim);
370 break;
371
372 case BFA_IOIM_SM_CLEANUP:
373 /**
374 * IO can be in cleanup state already due to TM command. 2nd cleanup
375 * request comes from ITN offline event.
376 */
377 break;
378
379 default:
Krishna Gudipatie641de32010-03-05 19:35:02 -0800380 bfa_sm_fault(ioim->bfa, event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700381 }
382}
383
384/**
385 * IO is waiting for room in request CQ
386 */
387static void
388bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
389{
390 bfa_trc(ioim->bfa, ioim->iotag);
391 bfa_trc(ioim->bfa, event);
392
393 switch (event) {
394 case BFA_IOIM_SM_QRESUME:
395 bfa_sm_set_state(ioim, bfa_ioim_sm_active);
396 bfa_ioim_send_ioreq(ioim);
397 break;
398
399 case BFA_IOIM_SM_ABORT:
400 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
401 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
402 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
403 ioim);
404 break;
405
406 case BFA_IOIM_SM_CLEANUP:
407 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
408 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
409 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
410 ioim);
411 bfa_ioim_notify_cleanup(ioim);
412 break;
413
414 case BFA_IOIM_SM_HWFAIL:
415 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
416 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
417 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
418 ioim);
419 break;
420
421 default:
Krishna Gudipatie641de32010-03-05 19:35:02 -0800422 bfa_sm_fault(ioim->bfa, event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700423 }
424}
425
426/**
427 * Active IO is being aborted, waiting for room in request CQ.
428 */
429static void
430bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
431{
432 bfa_trc(ioim->bfa, ioim->iotag);
433 bfa_trc(ioim->bfa, event);
434
435 switch (event) {
436 case BFA_IOIM_SM_QRESUME:
437 bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
438 bfa_ioim_send_abort(ioim);
439 break;
440
441 case BFA_IOIM_SM_CLEANUP:
442 bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
443 ioim->iosp->abort_explicit = BFA_FALSE;
444 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
445 break;
446
447 case BFA_IOIM_SM_COMP_GOOD:
448 case BFA_IOIM_SM_COMP:
449 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
450 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
451 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
452 ioim);
453 break;
454
455 case BFA_IOIM_SM_DONE:
456 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
457 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
458 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
459 ioim);
460 break;
461
462 case BFA_IOIM_SM_HWFAIL:
463 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
464 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
465 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
466 ioim);
467 break;
468
469 default:
Krishna Gudipatie641de32010-03-05 19:35:02 -0800470 bfa_sm_fault(ioim->bfa, event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700471 }
472}
473
474/**
475 * Active IO is being cleaned up, waiting for room in request CQ.
476 */
477static void
478bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
479{
480 bfa_trc(ioim->bfa, ioim->iotag);
481 bfa_trc(ioim->bfa, event);
482
483 switch (event) {
484 case BFA_IOIM_SM_QRESUME:
485 bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
486 bfa_ioim_send_abort(ioim);
487 break;
488
489 case BFA_IOIM_SM_ABORT:
490 /**
491 * IO is alraedy being cleaned up implicitly
492 */
493 ioim->io_cbfn = __bfa_cb_ioim_abort;
494 break;
495
496 case BFA_IOIM_SM_COMP_GOOD:
497 case BFA_IOIM_SM_COMP:
498 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
499 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
500 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
501 bfa_ioim_notify_cleanup(ioim);
502 break;
503
504 case BFA_IOIM_SM_DONE:
505 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
506 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
507 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
508 bfa_ioim_notify_cleanup(ioim);
509 break;
510
511 case BFA_IOIM_SM_HWFAIL:
512 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
513 bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
514 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
515 ioim);
516 break;
517
518 default:
Krishna Gudipatie641de32010-03-05 19:35:02 -0800519 bfa_sm_fault(ioim->bfa, event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700520 }
521}
522
523/**
524 * IO bfa callback is pending.
525 */
526static void
527bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
528{
529 bfa_trc_fp(ioim->bfa, ioim->iotag);
530 bfa_trc_fp(ioim->bfa, event);
531
532 switch (event) {
533 case BFA_IOIM_SM_HCB:
534 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
535 bfa_ioim_free(ioim);
536 bfa_cb_ioim_resfree(ioim->bfa->bfad);
537 break;
538
539 case BFA_IOIM_SM_CLEANUP:
540 bfa_ioim_notify_cleanup(ioim);
541 break;
542
543 case BFA_IOIM_SM_HWFAIL:
544 break;
545
546 default:
Krishna Gudipatie641de32010-03-05 19:35:02 -0800547 bfa_sm_fault(ioim->bfa, event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700548 }
549}
550
551/**
552 * IO bfa callback is pending. IO resource cannot be freed.
553 */
554static void
555bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
556{
557 bfa_trc(ioim->bfa, ioim->iotag);
558 bfa_trc(ioim->bfa, event);
559
560 switch (event) {
561 case BFA_IOIM_SM_HCB:
562 bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
563 list_del(&ioim->qe);
564 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
565 break;
566
567 case BFA_IOIM_SM_FREE:
568 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
569 break;
570
571 case BFA_IOIM_SM_CLEANUP:
572 bfa_ioim_notify_cleanup(ioim);
573 break;
574
575 case BFA_IOIM_SM_HWFAIL:
576 bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
577 break;
578
579 default:
Krishna Gudipatie641de32010-03-05 19:35:02 -0800580 bfa_sm_fault(ioim->bfa, event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700581 }
582}
583
584/**
585 * IO is completed, waiting resource free from firmware.
586 */
587static void
588bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
589{
590 bfa_trc(ioim->bfa, ioim->iotag);
591 bfa_trc(ioim->bfa, event);
592
593 switch (event) {
594 case BFA_IOIM_SM_FREE:
595 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
596 bfa_ioim_free(ioim);
597 bfa_cb_ioim_resfree(ioim->bfa->bfad);
598 break;
599
600 case BFA_IOIM_SM_CLEANUP:
601 bfa_ioim_notify_cleanup(ioim);
602 break;
603
604 case BFA_IOIM_SM_HWFAIL:
605 break;
606
607 default:
Krishna Gudipatie641de32010-03-05 19:35:02 -0800608 bfa_sm_fault(ioim->bfa, event);
Jing Huang7725ccf2009-09-23 17:46:15 -0700609 }
610}
611
612
613
614/**
615 * bfa_ioim_private
616 */
617
618static void
619__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
620{
621 struct bfa_ioim_s *ioim = cbarg;
622
623 if (!complete) {
624 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
625 return;
626 }
627
628 bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
629}
630
631static void
632__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
633{
634 struct bfa_ioim_s *ioim = cbarg;
635 struct bfi_ioim_rsp_s *m;
636 u8 *snsinfo = NULL;
637 u8 sns_len = 0;
638 s32 residue = 0;
639
640 if (!complete) {
641 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
642 return;
643 }
644
645 m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
646 if (m->io_status == BFI_IOIM_STS_OK) {
647 /**
648 * setup sense information, if present
649 */
650 if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION
651 && m->sns_len) {
652 sns_len = m->sns_len;
653 snsinfo = ioim->iosp->snsinfo;
654 }
655
656 /**
657 * setup residue value correctly for normal completions
658 */
659 if (m->resid_flags == FCP_RESID_UNDER)
660 residue = bfa_os_ntohl(m->residue);
661 if (m->resid_flags == FCP_RESID_OVER) {
662 residue = bfa_os_ntohl(m->residue);
663 residue = -residue;
664 }
665 }
666
667 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
668 m->scsi_status, sns_len, snsinfo, residue);
669}
670
671static void
672__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
673{
674 struct bfa_ioim_s *ioim = cbarg;
675
676 if (!complete) {
677 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
678 return;
679 }
680
681 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
682 0, 0, NULL, 0);
683}
684
685static void
686__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
687{
688 struct bfa_ioim_s *ioim = cbarg;
689
690 if (!complete) {
691 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
692 return;
693 }
694
695 bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
696 0, 0, NULL, 0);
697}
698
699static void
700__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
701{
702 struct bfa_ioim_s *ioim = cbarg;
703
704 if (!complete) {
705 bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
706 return;
707 }
708
709 bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
710}
711
712static void
713bfa_ioim_sgpg_alloced(void *cbarg)
714{
715 struct bfa_ioim_s *ioim = cbarg;
716
717 ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
718 list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
719 bfa_ioim_sgpg_setup(ioim);
720 bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
721}
722
723/**
724 * Send I/O request to firmware.
725 */
726static bfa_boolean_t
727bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
728{
729 struct bfa_itnim_s *itnim = ioim->itnim;
730 struct bfi_ioim_req_s *m;
731 static struct fcp_cmnd_s cmnd_z0 = { 0 };
732 struct bfi_sge_s *sge;
733 u32 pgdlen = 0;
Jing Huang2eba0d42010-03-19 11:06:05 -0700734 u64 addr;
735 struct scatterlist *sg;
736 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
Jing Huang7725ccf2009-09-23 17:46:15 -0700737
738 /**
739 * check for room in queue to send request now
740 */
Jing Huang36d345a2010-07-08 19:57:33 -0700741 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
Jing Huang7725ccf2009-09-23 17:46:15 -0700742 if (!m) {
Jing Huang36d345a2010-07-08 19:57:33 -0700743 bfa_reqq_wait(ioim->bfa, ioim->reqq,
Jing Huang7725ccf2009-09-23 17:46:15 -0700744 &ioim->iosp->reqq_wait);
745 return BFA_FALSE;
746 }
747
748 /**
749 * build i/o request message next
750 */
751 m->io_tag = bfa_os_htons(ioim->iotag);
752 m->rport_hdl = ioim->itnim->rport->fw_handle;
753 m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
754
755 /**
756 * build inline IO SG element here
757 */
758 sge = &m->sges[0];
759 if (ioim->nsges) {
Jing Huang2eba0d42010-03-19 11:06:05 -0700760 sg = (struct scatterlist *)scsi_sglist(cmnd);
Jing Huang077424e2010-03-19 11:07:36 -0700761 addr = bfa_os_sgaddr(sg_dma_address(sg));
Jing Huang2eba0d42010-03-19 11:06:05 -0700762 sge->sga = *(union bfi_addr_u *) &addr;
763 pgdlen = sg_dma_len(sg);
Jing Huang7725ccf2009-09-23 17:46:15 -0700764 sge->sg_len = pgdlen;
765 sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
766 BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
767 bfa_sge_to_be(sge);
768 sge++;
769 }
770
771 if (ioim->nsges > BFI_SGE_INLINE) {
772 sge->sga = ioim->sgpg->sgpg_pa;
773 } else {
774 sge->sga.a32.addr_lo = 0;
775 sge->sga.a32.addr_hi = 0;
776 }
777 sge->sg_len = pgdlen;
778 sge->flags = BFI_SGE_PGDLEN;
779 bfa_sge_to_be(sge);
780
781 /**
782 * set up I/O command parameters
783 */
784 bfa_os_assign(m->cmnd, cmnd_z0);
785 m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
786 m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
787 bfa_os_assign(m->cmnd.cdb,
788 *(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
789 m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
790
791 /**
792 * set up I/O message header
793 */
794 switch (m->cmnd.iodir) {
795 case FCP_IODIR_READ:
796 bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
797 bfa_stats(itnim, input_reqs);
798 break;
799 case FCP_IODIR_WRITE:
800 bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
801 bfa_stats(itnim, output_reqs);
802 break;
803 case FCP_IODIR_RW:
804 bfa_stats(itnim, input_reqs);
805 bfa_stats(itnim, output_reqs);
806 default:
807 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
808 }
809 if (itnim->seq_rec ||
810 (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
811 bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
812
813#ifdef IOIM_ADVANCED
814 m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
815 m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
816 m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
817
818 /**
819 * Handle large CDB (>16 bytes).
820 */
821 m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
822 FCP_CMND_CDB_LEN) / sizeof(u32);
823 if (m->cmnd.addl_cdb_len) {
824 bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
825 bfa_cb_ioim_get_cdb(ioim->dio) + 1,
826 m->cmnd.addl_cdb_len * sizeof(u32));
827 fcp_cmnd_fcpdl(&m->cmnd) =
828 bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
829 }
830#endif
831
832 /**
833 * queue I/O message to firmware
834 */
Jing Huang36d345a2010-07-08 19:57:33 -0700835 bfa_reqq_produce(ioim->bfa, ioim->reqq);
Jing Huang7725ccf2009-09-23 17:46:15 -0700836 return BFA_TRUE;
837}
838
839/**
840 * Setup any additional SG pages needed.Inline SG element is setup
841 * at queuing time.
842 */
843static bfa_boolean_t
844bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
845{
846 u16 nsgpgs;
847
848 bfa_assert(ioim->nsges > BFI_SGE_INLINE);
849
850 /**
851 * allocate SG pages needed
852 */
853 nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
854 if (!nsgpgs)
855 return BFA_TRUE;
856
857 if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
858 != BFA_STATUS_OK) {
859 bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
860 return BFA_FALSE;
861 }
862
863 ioim->nsgpgs = nsgpgs;
864 bfa_ioim_sgpg_setup(ioim);
865
866 return BFA_TRUE;
867}
868
869static void
870bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
871{
872 int sgeid, nsges, i;
873 struct bfi_sge_s *sge;
874 struct bfa_sgpg_s *sgpg;
875 u32 pgcumsz;
Jing Huang2eba0d42010-03-19 11:06:05 -0700876 u64 addr;
877 struct scatterlist *sg;
878 struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
Jing Huang7725ccf2009-09-23 17:46:15 -0700879
880 sgeid = BFI_SGE_INLINE;
881 ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
882
Jing Huang2eba0d42010-03-19 11:06:05 -0700883 sg = scsi_sglist(cmnd);
884 sg = sg_next(sg);
885
Jing Huang7725ccf2009-09-23 17:46:15 -0700886 do {
887 sge = sgpg->sgpg->sges;
888 nsges = ioim->nsges - sgeid;
889 if (nsges > BFI_SGPG_DATA_SGES)
890 nsges = BFI_SGPG_DATA_SGES;
891
892 pgcumsz = 0;
Jing Huang2eba0d42010-03-19 11:06:05 -0700893 for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
Jing Huang077424e2010-03-19 11:07:36 -0700894 addr = bfa_os_sgaddr(sg_dma_address(sg));
Jing Huang2eba0d42010-03-19 11:06:05 -0700895 sge->sga = *(union bfi_addr_u *) &addr;
896 sge->sg_len = sg_dma_len(sg);
Jing Huang7725ccf2009-09-23 17:46:15 -0700897 pgcumsz += sge->sg_len;
898
899 /**
900 * set flags
901 */
902 if (i < (nsges - 1))
903 sge->flags = BFI_SGE_DATA;
904 else if (sgeid < (ioim->nsges - 1))
905 sge->flags = BFI_SGE_DATA_CPL;
906 else
907 sge->flags = BFI_SGE_DATA_LAST;
908 }
909
910 sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
911
912 /**
913 * set the link element of each page
914 */
915 if (sgeid == ioim->nsges) {
916 sge->flags = BFI_SGE_PGDLEN;
917 sge->sga.a32.addr_lo = 0;
918 sge->sga.a32.addr_hi = 0;
919 } else {
920 sge->flags = BFI_SGE_LINK;
921 sge->sga = sgpg->sgpg_pa;
922 }
923 sge->sg_len = pgcumsz;
924 } while (sgeid < ioim->nsges);
925}
926
927/**
928 * Send I/O abort request to firmware.
929 */
930static bfa_boolean_t
931bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
932{
Jing Huang7725ccf2009-09-23 17:46:15 -0700933 struct bfi_ioim_abort_req_s *m;
934 enum bfi_ioim_h2i msgop;
935
936 /**
937 * check for room in queue to send request now
938 */
Jing Huang36d345a2010-07-08 19:57:33 -0700939 m = bfa_reqq_next(ioim->bfa, ioim->reqq);
Jing Huang7725ccf2009-09-23 17:46:15 -0700940 if (!m)
941 return BFA_FALSE;
942
943 /**
944 * build i/o request message next
945 */
946 if (ioim->iosp->abort_explicit)
947 msgop = BFI_IOIM_H2I_IOABORT_REQ;
948 else
949 msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
950
951 bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
952 m->io_tag = bfa_os_htons(ioim->iotag);
953 m->abort_tag = ++ioim->abort_tag;
954
955 /**
956 * queue I/O message to firmware
957 */
Jing Huang36d345a2010-07-08 19:57:33 -0700958 bfa_reqq_produce(ioim->bfa, ioim->reqq);
Jing Huang7725ccf2009-09-23 17:46:15 -0700959 return BFA_TRUE;
960}
961
962/**
963 * Call to resume any I/O requests waiting for room in request queue.
964 */
965static void
966bfa_ioim_qresume(void *cbarg)
967{
968 struct bfa_ioim_s *ioim = cbarg;
969
970 bfa_fcpim_stats(ioim->fcpim, qresumes);
971 bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
972}
973
974
975static void
976bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
977{
978 /**
979 * Move IO from itnim queue to fcpim global queue since itnim will be
980 * freed.
981 */
982 list_del(&ioim->qe);
983 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
984
985 if (!ioim->iosp->tskim) {
986 if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
987 bfa_cb_dequeue(&ioim->hcb_qe);
988 list_del(&ioim->qe);
989 list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
990 }
991 bfa_itnim_iodone(ioim->itnim);
992 } else
993 bfa_tskim_iodone(ioim->iosp->tskim);
994}
995
996/**
997 * or after the link comes back.
998 */
999void
1000bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
1001{
1002 /**
1003 * If path tov timer expired, failback with PATHTOV status - these
1004 * IO requests are not normally retried by IO stack.
1005 *
1006 * Otherwise device cameback online and fail it with normal failed
1007 * status so that IO stack retries these failed IO requests.
1008 */
1009 if (iotov)
1010 ioim->io_cbfn = __bfa_cb_ioim_pathtov;
1011 else
1012 ioim->io_cbfn = __bfa_cb_ioim_failed;
1013
1014 bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
1015
1016 /**
1017 * Move IO to fcpim global queue since itnim will be
1018 * freed.
1019 */
1020 list_del(&ioim->qe);
1021 list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
1022}
1023
1024
1025
1026/**
1027 * bfa_ioim_friend
1028 */
1029
1030/**
1031 * Memory allocation and initialization.
1032 */
1033void
1034bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
1035{
1036 struct bfa_ioim_s *ioim;
1037 struct bfa_ioim_sp_s *iosp;
1038 u16 i;
1039 u8 *snsinfo;
1040 u32 snsbufsz;
1041
1042 /**
1043 * claim memory first
1044 */
1045 ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
1046 fcpim->ioim_arr = ioim;
1047 bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
1048
1049 iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
1050 fcpim->ioim_sp_arr = iosp;
1051 bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
1052
1053 /**
1054 * Claim DMA memory for per IO sense data.
1055 */
1056 snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
1057 fcpim->snsbase.pa = bfa_meminfo_dma_phys(minfo);
1058 bfa_meminfo_dma_phys(minfo) += snsbufsz;
1059
1060 fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
1061 bfa_meminfo_dma_virt(minfo) += snsbufsz;
1062 snsinfo = fcpim->snsbase.kva;
1063 bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
1064
1065 /**
1066 * Initialize ioim free queues
1067 */
1068 INIT_LIST_HEAD(&fcpim->ioim_free_q);
1069 INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
1070 INIT_LIST_HEAD(&fcpim->ioim_comp_q);
1071
1072 for (i = 0; i < fcpim->num_ioim_reqs;
1073 i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
1074 /*
1075 * initialize IOIM
1076 */
1077 bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
1078 ioim->iotag = i;
1079 ioim->bfa = fcpim->bfa;
1080 ioim->fcpim = fcpim;
1081 ioim->iosp = iosp;
1082 iosp->snsinfo = snsinfo;
1083 INIT_LIST_HEAD(&ioim->sgpg_q);
1084 bfa_reqq_winit(&ioim->iosp->reqq_wait,
1085 bfa_ioim_qresume, ioim);
1086 bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
1087 bfa_ioim_sgpg_alloced, ioim);
1088 bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
1089
1090 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
1091 }
1092}
1093
1094/**
1095 * Driver detach time call.
1096 */
1097void
1098bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
1099{
1100}
1101
1102void
1103bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1104{
1105 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1106 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
1107 struct bfa_ioim_s *ioim;
1108 u16 iotag;
1109 enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
1110
1111 iotag = bfa_os_ntohs(rsp->io_tag);
1112
1113 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
1114 bfa_assert(ioim->iotag == iotag);
1115
1116 bfa_trc(ioim->bfa, ioim->iotag);
1117 bfa_trc(ioim->bfa, rsp->io_status);
1118 bfa_trc(ioim->bfa, rsp->reuse_io_tag);
1119
1120 if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
1121 bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
1122
1123 switch (rsp->io_status) {
1124 case BFI_IOIM_STS_OK:
1125 bfa_fcpim_stats(fcpim, iocomp_ok);
1126 if (rsp->reuse_io_tag == 0)
1127 evt = BFA_IOIM_SM_DONE;
1128 else
1129 evt = BFA_IOIM_SM_COMP;
1130 break;
1131
1132 case BFI_IOIM_STS_TIMEDOUT:
1133 case BFI_IOIM_STS_ABORTED:
1134 rsp->io_status = BFI_IOIM_STS_ABORTED;
1135 bfa_fcpim_stats(fcpim, iocomp_aborted);
1136 if (rsp->reuse_io_tag == 0)
1137 evt = BFA_IOIM_SM_DONE;
1138 else
1139 evt = BFA_IOIM_SM_COMP;
1140 break;
1141
1142 case BFI_IOIM_STS_PROTO_ERR:
1143 bfa_fcpim_stats(fcpim, iocom_proto_err);
1144 bfa_assert(rsp->reuse_io_tag);
1145 evt = BFA_IOIM_SM_COMP;
1146 break;
1147
1148 case BFI_IOIM_STS_SQER_NEEDED:
1149 bfa_fcpim_stats(fcpim, iocom_sqer_needed);
1150 bfa_assert(rsp->reuse_io_tag == 0);
1151 evt = BFA_IOIM_SM_SQRETRY;
1152 break;
1153
1154 case BFI_IOIM_STS_RES_FREE:
1155 bfa_fcpim_stats(fcpim, iocom_res_free);
1156 evt = BFA_IOIM_SM_FREE;
1157 break;
1158
1159 case BFI_IOIM_STS_HOST_ABORTED:
1160 bfa_fcpim_stats(fcpim, iocom_hostabrts);
1161 if (rsp->abort_tag != ioim->abort_tag) {
1162 bfa_trc(ioim->bfa, rsp->abort_tag);
1163 bfa_trc(ioim->bfa, ioim->abort_tag);
1164 return;
1165 }
1166
1167 if (rsp->reuse_io_tag)
1168 evt = BFA_IOIM_SM_ABORT_COMP;
1169 else
1170 evt = BFA_IOIM_SM_ABORT_DONE;
1171 break;
1172
1173 case BFI_IOIM_STS_UTAG:
1174 bfa_fcpim_stats(fcpim, iocom_utags);
1175 evt = BFA_IOIM_SM_COMP_UTAG;
1176 break;
1177
1178 default:
1179 bfa_assert(0);
1180 }
1181
1182 bfa_sm_send_event(ioim, evt);
1183}
1184
1185void
1186bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1187{
1188 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1189 struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
1190 struct bfa_ioim_s *ioim;
1191 u16 iotag;
1192
1193 iotag = bfa_os_ntohs(rsp->io_tag);
1194
1195 ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
1196 bfa_assert(ioim->iotag == iotag);
1197
1198 bfa_trc_fp(ioim->bfa, ioim->iotag);
1199 bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
1200}
1201
1202/**
1203 * Called by itnim to clean up IO while going offline.
1204 */
1205void
1206bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
1207{
1208 bfa_trc(ioim->bfa, ioim->iotag);
1209 bfa_fcpim_stats(ioim->fcpim, io_cleanups);
1210
1211 ioim->iosp->tskim = NULL;
1212 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
1213}
1214
1215void
1216bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
1217{
1218 bfa_trc(ioim->bfa, ioim->iotag);
1219 bfa_fcpim_stats(ioim->fcpim, io_tmaborts);
1220
1221 ioim->iosp->tskim = tskim;
1222 bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
1223}
1224
1225/**
1226 * IOC failure handling.
1227 */
1228void
1229bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
1230{
1231 bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
1232}
1233
1234/**
1235 * IO offline TOV popped. Fail the pending IO.
1236 */
1237void
1238bfa_ioim_tov(struct bfa_ioim_s *ioim)
1239{
1240 bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
1241}
1242
1243
1244
1245/**
1246 * bfa_ioim_api
1247 */
1248
1249/**
1250 * Allocate IOIM resource for initiator mode I/O request.
1251 */
1252struct bfa_ioim_s *
1253bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
1254 struct bfa_itnim_s *itnim, u16 nsges)
1255{
1256 struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
1257 struct bfa_ioim_s *ioim;
1258
1259 /**
1260 * alocate IOIM resource
1261 */
1262 bfa_q_deq(&fcpim->ioim_free_q, &ioim);
1263 if (!ioim) {
1264 bfa_fcpim_stats(fcpim, no_iotags);
1265 return NULL;
1266 }
1267
1268 ioim->dio = dio;
1269 ioim->itnim = itnim;
1270 ioim->nsges = nsges;
1271 ioim->nsgpgs = 0;
1272
1273 bfa_stats(fcpim, total_ios);
1274 bfa_stats(itnim, ios);
1275 fcpim->ios_active++;
1276
1277 list_add_tail(&ioim->qe, &itnim->io_q);
1278 bfa_trc_fp(ioim->bfa, ioim->iotag);
1279
1280 return ioim;
1281}
1282
1283void
1284bfa_ioim_free(struct bfa_ioim_s *ioim)
1285{
1286 struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
1287
1288 bfa_trc_fp(ioim->bfa, ioim->iotag);
1289 bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
1290
1291 bfa_assert_fp(list_empty(&ioim->sgpg_q)
1292 || (ioim->nsges > BFI_SGE_INLINE));
1293
1294 if (ioim->nsgpgs > 0)
1295 bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
1296
1297 bfa_stats(ioim->itnim, io_comps);
1298 fcpim->ios_active--;
1299
1300 list_del(&ioim->qe);
1301 list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
1302}
1303
1304void
1305bfa_ioim_start(struct bfa_ioim_s *ioim)
1306{
1307 bfa_trc_fp(ioim->bfa, ioim->iotag);
Jing Huang36d345a2010-07-08 19:57:33 -07001308
1309 /**
1310 * Obtain the queue over which this request has to be issued
1311 */
1312 ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
1313 bfa_cb_ioim_get_reqq(ioim->dio) :
1314 bfa_itnim_get_reqq(ioim);
1315
Jing Huang7725ccf2009-09-23 17:46:15 -07001316 bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
1317}
1318
1319/**
1320 * Driver I/O abort request.
1321 */
1322void
1323bfa_ioim_abort(struct bfa_ioim_s *ioim)
1324{
1325 bfa_trc(ioim->bfa, ioim->iotag);
1326 bfa_fcpim_stats(ioim->fcpim, io_aborts);
1327 bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
1328}
1329
1330