blob: c9192869c0fe0fa0bcca108477429450b207e174 [file] [log] [blame]
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include "bfa_os_inc.h"
19#include "bfa_plog.h"
20#include "bfa_cs.h"
21#include "bfa_modules.h"
22#include "bfad_drv.h"
23
24BFA_TRC_FILE(HAL, FCXP);
25BFA_MODULE(fcxp);
26BFA_MODULE(sgpg);
27BFA_MODULE(lps);
28BFA_MODULE(fcport);
29BFA_MODULE(rport);
30BFA_MODULE(uf);
31
Jing Huang5fbe25c2010-10-18 17:17:23 -070032/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070033 * LPS related definitions
34 */
35#define BFA_LPS_MIN_LPORTS (1)
36#define BFA_LPS_MAX_LPORTS (256)
37
38/*
39 * Maximum Vports supported per physical port or vf.
40 */
41#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
43
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070044
Jing Huang5fbe25c2010-10-18 17:17:23 -070045/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070046 * FC PORT related definitions
47 */
48/*
49 * The port is considered disabled if corresponding physical port or IOC are
50 * disabled explicitly
51 */
52#define BFA_PORT_IS_DISABLED(bfa) \
53 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
54 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
55
56
Jing Huang5fbe25c2010-10-18 17:17:23 -070057/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070058 * BFA port state machine events
59 */
60enum bfa_fcport_sm_event {
61 BFA_FCPORT_SM_START = 1, /* start port state machine */
62 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
63 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
64 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
65 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
66 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
67 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
68 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
69 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
70};
71
Jing Huang5fbe25c2010-10-18 17:17:23 -070072/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070073 * BFA port link notification state machine events
74 */
75
76enum bfa_fcport_ln_sm_event {
77 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
78 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
79 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
80};
81
Jing Huang5fbe25c2010-10-18 17:17:23 -070082/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070083 * RPORT related definitions
84 */
85#define bfa_rport_offline_cb(__rp) do { \
86 if ((__rp)->bfa->fcs) \
87 bfa_cb_rport_offline((__rp)->rport_drv); \
88 else { \
89 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
90 __bfa_cb_rport_offline, (__rp)); \
91 } \
92} while (0)
93
94#define bfa_rport_online_cb(__rp) do { \
95 if ((__rp)->bfa->fcs) \
96 bfa_cb_rport_online((__rp)->rport_drv); \
97 else { \
98 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
99 __bfa_cb_rport_online, (__rp)); \
100 } \
101} while (0)
102
Jing Huang5fbe25c2010-10-18 17:17:23 -0700103/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700104 * forward declarations FCXP related functions
105 */
106static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
107static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
108 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
109static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
110 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
111static void bfa_fcxp_qresume(void *cbarg);
112static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
113 struct bfi_fcxp_send_req_s *send_req);
114
Jing Huang5fbe25c2010-10-18 17:17:23 -0700115/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700116 * forward declarations for LPS functions
117 */
118static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
119 u32 *dm_len);
120static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
121 struct bfa_iocfc_cfg_s *cfg,
122 struct bfa_meminfo_s *meminfo,
123 struct bfa_pcidev_s *pcidev);
124static void bfa_lps_detach(struct bfa_s *bfa);
125static void bfa_lps_start(struct bfa_s *bfa);
126static void bfa_lps_stop(struct bfa_s *bfa);
127static void bfa_lps_iocdisable(struct bfa_s *bfa);
128static void bfa_lps_login_rsp(struct bfa_s *bfa,
129 struct bfi_lps_login_rsp_s *rsp);
130static void bfa_lps_logout_rsp(struct bfa_s *bfa,
131 struct bfi_lps_logout_rsp_s *rsp);
132static void bfa_lps_reqq_resume(void *lps_arg);
133static void bfa_lps_free(struct bfa_lps_s *lps);
134static void bfa_lps_send_login(struct bfa_lps_s *lps);
135static void bfa_lps_send_logout(struct bfa_lps_s *lps);
136static void bfa_lps_login_comp(struct bfa_lps_s *lps);
137static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
138static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
139
Jing Huang5fbe25c2010-10-18 17:17:23 -0700140/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700141 * forward declaration for LPS state machine
142 */
143static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
144static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
145static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
146 event);
147static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
148static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
149static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
150 event);
151
Jing Huang5fbe25c2010-10-18 17:17:23 -0700152/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700153 * forward declaration for FC Port functions
154 */
155static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
156static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
157static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
158static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
159static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
160static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
161static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
162 enum bfa_port_linkstate event, bfa_boolean_t trunk);
163static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
164 enum bfa_port_linkstate event);
165static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
166static void bfa_fcport_stats_get_timeout(void *cbarg);
167static void bfa_fcport_stats_clr_timeout(void *cbarg);
168static void bfa_trunk_iocdisable(struct bfa_s *bfa);
169
Jing Huang5fbe25c2010-10-18 17:17:23 -0700170/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700171 * forward declaration for FC PORT state machine
172 */
173static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
174 enum bfa_fcport_sm_event event);
175static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
176 enum bfa_fcport_sm_event event);
177static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
178 enum bfa_fcport_sm_event event);
179static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
180 enum bfa_fcport_sm_event event);
181static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
182 enum bfa_fcport_sm_event event);
183static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
184 enum bfa_fcport_sm_event event);
185static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
186 enum bfa_fcport_sm_event event);
187static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
188 enum bfa_fcport_sm_event event);
189static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
190 enum bfa_fcport_sm_event event);
191static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
192 enum bfa_fcport_sm_event event);
193static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
194 enum bfa_fcport_sm_event event);
195static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
196 enum bfa_fcport_sm_event event);
197
198static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
199 enum bfa_fcport_ln_sm_event event);
200static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
201 enum bfa_fcport_ln_sm_event event);
202static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
203 enum bfa_fcport_ln_sm_event event);
204static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
205 enum bfa_fcport_ln_sm_event event);
206static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
207 enum bfa_fcport_ln_sm_event event);
208static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
209 enum bfa_fcport_ln_sm_event event);
210static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
211 enum bfa_fcport_ln_sm_event event);
212
213static struct bfa_sm_table_s hal_port_sm_table[] = {
214 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
215 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
216 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
217 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
218 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
219 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
220 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
221 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
222 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
223 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
224 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
225 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
226};
227
228
Jing Huang5fbe25c2010-10-18 17:17:23 -0700229/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700230 * forward declaration for RPORT related functions
231 */
232static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
233static void bfa_rport_free(struct bfa_rport_s *rport);
234static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
235static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
236static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
237static void __bfa_cb_rport_online(void *cbarg,
238 bfa_boolean_t complete);
239static void __bfa_cb_rport_offline(void *cbarg,
240 bfa_boolean_t complete);
241
Jing Huang5fbe25c2010-10-18 17:17:23 -0700242/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700243 * forward declaration for RPORT state machine
244 */
245static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
246 enum bfa_rport_event event);
247static void bfa_rport_sm_created(struct bfa_rport_s *rp,
248 enum bfa_rport_event event);
249static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
250 enum bfa_rport_event event);
251static void bfa_rport_sm_online(struct bfa_rport_s *rp,
252 enum bfa_rport_event event);
253static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
254 enum bfa_rport_event event);
255static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
256 enum bfa_rport_event event);
257static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
258 enum bfa_rport_event event);
259static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
260 enum bfa_rport_event event);
261static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
262 enum bfa_rport_event event);
263static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
264 enum bfa_rport_event event);
265static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
266 enum bfa_rport_event event);
267static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
268 enum bfa_rport_event event);
269static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
270 enum bfa_rport_event event);
271
Jing Huang5fbe25c2010-10-18 17:17:23 -0700272/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700273 * PLOG related definitions
274 */
275static int
276plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
277{
278 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
279 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
280 return 1;
281
282 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
283 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
284 return 1;
285
286 return 0;
287}
288
289static void
290bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
291{
292 u16 tail;
293 struct bfa_plog_rec_s *pl_recp;
294
295 if (plog->plog_enabled == 0)
296 return;
297
298 if (plkd_validate_logrec(pl_rec)) {
299 bfa_assert(0);
300 return;
301 }
302
303 tail = plog->tail;
304
305 pl_recp = &(plog->plog_recs[tail]);
306
Jing Huang6a18b162010-10-18 17:08:54 -0700307 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700308
309 pl_recp->tv = bfa_os_get_log_time();
310 BFA_PL_LOG_REC_INCR(plog->tail);
311
312 if (plog->head == plog->tail)
313 BFA_PL_LOG_REC_INCR(plog->head);
314}
315
316void
317bfa_plog_init(struct bfa_plog_s *plog)
318{
Jing Huang6a18b162010-10-18 17:08:54 -0700319 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700320
Jing Huang6a18b162010-10-18 17:08:54 -0700321 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700322 plog->head = plog->tail = 0;
323 plog->plog_enabled = 1;
324}
325
326void
327bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
328 enum bfa_plog_eid event,
329 u16 misc, char *log_str)
330{
331 struct bfa_plog_rec_s lp;
332
333 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700334 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700335 lp.mid = mid;
336 lp.eid = event;
337 lp.log_type = BFA_PL_LOG_TYPE_STRING;
338 lp.misc = misc;
339 strncpy(lp.log_entry.string_log, log_str,
340 BFA_PL_STRING_LOG_SZ - 1);
341 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
342 bfa_plog_add(plog, &lp);
343 }
344}
345
346void
347bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
348 enum bfa_plog_eid event,
349 u16 misc, u32 *intarr, u32 num_ints)
350{
351 struct bfa_plog_rec_s lp;
352 u32 i;
353
354 if (num_ints > BFA_PL_INT_LOG_SZ)
355 num_ints = BFA_PL_INT_LOG_SZ;
356
357 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700358 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700359 lp.mid = mid;
360 lp.eid = event;
361 lp.log_type = BFA_PL_LOG_TYPE_INT;
362 lp.misc = misc;
363
364 for (i = 0; i < num_ints; i++)
Jing Huang6a18b162010-10-18 17:08:54 -0700365 lp.log_entry.int_log[i] = intarr[i];
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700366
367 lp.log_num_ints = (u8) num_ints;
368
369 bfa_plog_add(plog, &lp);
370 }
371}
372
373void
374bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
375 enum bfa_plog_eid event,
376 u16 misc, struct fchs_s *fchdr)
377{
378 struct bfa_plog_rec_s lp;
379 u32 *tmp_int = (u32 *) fchdr;
380 u32 ints[BFA_PL_INT_LOG_SZ];
381
382 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700383 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700384
385 ints[0] = tmp_int[0];
386 ints[1] = tmp_int[1];
387 ints[2] = tmp_int[4];
388
389 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
390 }
391}
392
393void
394bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
395 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
396 u32 pld_w0)
397{
398 struct bfa_plog_rec_s lp;
399 u32 *tmp_int = (u32 *) fchdr;
400 u32 ints[BFA_PL_INT_LOG_SZ];
401
402 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700403 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700404
405 ints[0] = tmp_int[0];
406 ints[1] = tmp_int[1];
407 ints[2] = tmp_int[4];
408 ints[3] = pld_w0;
409
410 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
411 }
412}
413
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700414
Jing Huang5fbe25c2010-10-18 17:17:23 -0700415/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700416 * fcxp_pvt BFA FCXP private functions
417 */
418
419static void
420claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
421{
422 u8 *dm_kva = NULL;
423 u64 dm_pa;
424 u32 buf_pool_sz;
425
426 dm_kva = bfa_meminfo_dma_virt(mi);
427 dm_pa = bfa_meminfo_dma_phys(mi);
428
429 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
430
431 /*
432 * Initialize the fcxp req payload list
433 */
434 mod->req_pld_list_kva = dm_kva;
435 mod->req_pld_list_pa = dm_pa;
436 dm_kva += buf_pool_sz;
437 dm_pa += buf_pool_sz;
Jing Huang6a18b162010-10-18 17:08:54 -0700438 memset(mod->req_pld_list_kva, 0, buf_pool_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700439
440 /*
441 * Initialize the fcxp rsp payload list
442 */
443 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
444 mod->rsp_pld_list_kva = dm_kva;
445 mod->rsp_pld_list_pa = dm_pa;
446 dm_kva += buf_pool_sz;
447 dm_pa += buf_pool_sz;
Jing Huang6a18b162010-10-18 17:08:54 -0700448 memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700449
450 bfa_meminfo_dma_virt(mi) = dm_kva;
451 bfa_meminfo_dma_phys(mi) = dm_pa;
452}
453
454static void
455claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
456{
457 u16 i;
458 struct bfa_fcxp_s *fcxp;
459
460 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
Jing Huang6a18b162010-10-18 17:08:54 -0700461 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700462
463 INIT_LIST_HEAD(&mod->fcxp_free_q);
464 INIT_LIST_HEAD(&mod->fcxp_active_q);
465
466 mod->fcxp_list = fcxp;
467
468 for (i = 0; i < mod->num_fcxps; i++) {
469 fcxp->fcxp_mod = mod;
470 fcxp->fcxp_tag = i;
471
472 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
473 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
474 fcxp->reqq_waiting = BFA_FALSE;
475
476 fcxp = fcxp + 1;
477 }
478
479 bfa_meminfo_kva(mi) = (void *)fcxp;
480}
481
482static void
483bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
484 u32 *dm_len)
485{
486 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
487
488 if (num_fcxp_reqs == 0)
489 return;
490
491 /*
492 * Account for req/rsp payload
493 */
494 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
495 if (cfg->drvcfg.min_cfg)
496 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
497 else
498 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
499
500 /*
501 * Account for fcxp structs
502 */
503 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
504}
505
506static void
507bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
508 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
509{
510 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
511
Jing Huang6a18b162010-10-18 17:08:54 -0700512 memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700513 mod->bfa = bfa;
514 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
515
Jing Huang5fbe25c2010-10-18 17:17:23 -0700516 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700517 * Initialize FCXP request and response payload sizes.
518 */
519 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
520 if (!cfg->drvcfg.min_cfg)
521 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
522
523 INIT_LIST_HEAD(&mod->wait_q);
524
525 claim_fcxp_req_rsp_mem(mod, meminfo);
526 claim_fcxps_mem(mod, meminfo);
527}
528
529static void
530bfa_fcxp_detach(struct bfa_s *bfa)
531{
532}
533
534static void
535bfa_fcxp_start(struct bfa_s *bfa)
536{
537}
538
539static void
540bfa_fcxp_stop(struct bfa_s *bfa)
541{
542}
543
544static void
545bfa_fcxp_iocdisable(struct bfa_s *bfa)
546{
547 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
548 struct bfa_fcxp_s *fcxp;
549 struct list_head *qe, *qen;
550
551 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
552 fcxp = (struct bfa_fcxp_s *) qe;
553 if (fcxp->caller == NULL) {
554 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
555 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
556 bfa_fcxp_free(fcxp);
557 } else {
558 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
559 bfa_cb_queue(bfa, &fcxp->hcb_qe,
560 __bfa_fcxp_send_cbfn, fcxp);
561 }
562 }
563}
564
565static struct bfa_fcxp_s *
566bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
567{
568 struct bfa_fcxp_s *fcxp;
569
570 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
571
572 if (fcxp)
573 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
574
575 return fcxp;
576}
577
578static void
579bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
580 struct bfa_s *bfa,
581 u8 *use_ibuf,
582 u32 *nr_sgles,
583 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
584 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
585 struct list_head *r_sgpg_q,
586 int n_sgles,
587 bfa_fcxp_get_sgaddr_t sga_cbfn,
588 bfa_fcxp_get_sglen_t sglen_cbfn)
589{
590
591 bfa_assert(bfa != NULL);
592
593 bfa_trc(bfa, fcxp->fcxp_tag);
594
595 if (n_sgles == 0) {
596 *use_ibuf = 1;
597 } else {
598 bfa_assert(*sga_cbfn != NULL);
599 bfa_assert(*sglen_cbfn != NULL);
600
601 *use_ibuf = 0;
602 *r_sga_cbfn = sga_cbfn;
603 *r_sglen_cbfn = sglen_cbfn;
604
605 *nr_sgles = n_sgles;
606
607 /*
608 * alloc required sgpgs
609 */
610 if (n_sgles > BFI_SGE_INLINE)
611 bfa_assert(0);
612 }
613
614}
615
616static void
617bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
618 void *caller, struct bfa_s *bfa, int nreq_sgles,
619 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
620 bfa_fcxp_get_sglen_t req_sglen_cbfn,
621 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
622 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
623{
624
625 bfa_assert(bfa != NULL);
626
627 bfa_trc(bfa, fcxp->fcxp_tag);
628
629 fcxp->caller = caller;
630
631 bfa_fcxp_init_reqrsp(fcxp, bfa,
632 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
633 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
634 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
635
636 bfa_fcxp_init_reqrsp(fcxp, bfa,
637 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
638 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
639 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
640
641}
642
643static void
644bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
645{
646 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
647 struct bfa_fcxp_wqe_s *wqe;
648
649 bfa_q_deq(&mod->wait_q, &wqe);
650 if (wqe) {
651 bfa_trc(mod->bfa, fcxp->fcxp_tag);
652
653 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
654 wqe->nrsp_sgles, wqe->req_sga_cbfn,
655 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
656 wqe->rsp_sglen_cbfn);
657
658 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
659 return;
660 }
661
662 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
663 list_del(&fcxp->qe);
664 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
665}
666
667static void
668bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
669 bfa_status_t req_status, u32 rsp_len,
670 u32 resid_len, struct fchs_s *rsp_fchs)
671{
672 /* discarded fcxp completion */
673}
674
675static void
676__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
677{
678 struct bfa_fcxp_s *fcxp = cbarg;
679
680 if (complete) {
681 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
682 fcxp->rsp_status, fcxp->rsp_len,
683 fcxp->residue_len, &fcxp->rsp_fchs);
684 } else {
685 bfa_fcxp_free(fcxp);
686 }
687}
688
689static void
690hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
691{
692 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
693 struct bfa_fcxp_s *fcxp;
Jing Huangba816ea2010-10-18 17:10:50 -0700694 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700695
696 bfa_trc(bfa, fcxp_tag);
697
Jing Huangba816ea2010-10-18 17:10:50 -0700698 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700699
Jing Huang5fbe25c2010-10-18 17:17:23 -0700700 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700701 * @todo f/w should not set residue to non-0 when everything
702 * is received.
703 */
704 if (fcxp_rsp->req_status == BFA_STATUS_OK)
705 fcxp_rsp->residue_len = 0;
706 else
Jing Huangba816ea2010-10-18 17:10:50 -0700707 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700708
709 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
710
711 bfa_assert(fcxp->send_cbfn != NULL);
712
713 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
714
715 if (fcxp->send_cbfn != NULL) {
716 bfa_trc(mod->bfa, (NULL == fcxp->caller));
717 if (fcxp->caller == NULL) {
718 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
719 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
720 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
721 /*
722 * fcxp automatically freed on return from the callback
723 */
724 bfa_fcxp_free(fcxp);
725 } else {
726 fcxp->rsp_status = fcxp_rsp->req_status;
727 fcxp->rsp_len = fcxp_rsp->rsp_len;
728 fcxp->residue_len = fcxp_rsp->residue_len;
729 fcxp->rsp_fchs = fcxp_rsp->fchs;
730
731 bfa_cb_queue(bfa, &fcxp->hcb_qe,
732 __bfa_fcxp_send_cbfn, fcxp);
733 }
734 } else {
735 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
736 }
737}
738
739static void
740hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
741{
742 union bfi_addr_u sga_zero = { {0} };
743
744 sge->sg_len = reqlen;
745 sge->flags = BFI_SGE_DATA_LAST;
746 bfa_dma_addr_set(sge[0].sga, req_pa);
747 bfa_sge_to_be(sge);
748 sge++;
749
750 sge->sga = sga_zero;
751 sge->sg_len = reqlen;
752 sge->flags = BFI_SGE_PGDLEN;
753 bfa_sge_to_be(sge);
754}
755
756static void
757hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
758 struct fchs_s *fchs)
759{
760 /*
761 * TODO: TX ox_id
762 */
763 if (reqlen > 0) {
764 if (fcxp->use_ireqbuf) {
765 u32 pld_w0 =
766 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
767
768 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
769 BFA_PL_EID_TX,
770 reqlen + sizeof(struct fchs_s), fchs,
771 pld_w0);
772 } else {
773 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
774 BFA_PL_EID_TX,
775 reqlen + sizeof(struct fchs_s),
776 fchs);
777 }
778 } else {
779 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
780 reqlen + sizeof(struct fchs_s), fchs);
781 }
782}
783
784static void
785hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
786 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
787{
788 if (fcxp_rsp->rsp_len > 0) {
789 if (fcxp->use_irspbuf) {
790 u32 pld_w0 =
791 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
792
793 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
794 BFA_PL_EID_RX,
795 (u16) fcxp_rsp->rsp_len,
796 &fcxp_rsp->fchs, pld_w0);
797 } else {
798 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
799 BFA_PL_EID_RX,
800 (u16) fcxp_rsp->rsp_len,
801 &fcxp_rsp->fchs);
802 }
803 } else {
804 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
805 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
806 }
807}
808
Jing Huang5fbe25c2010-10-18 17:17:23 -0700809/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700810 * Handler to resume sending fcxp when space in available in cpe queue.
811 */
812static void
813bfa_fcxp_qresume(void *cbarg)
814{
815 struct bfa_fcxp_s *fcxp = cbarg;
816 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
817 struct bfi_fcxp_send_req_s *send_req;
818
819 fcxp->reqq_waiting = BFA_FALSE;
820 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
821 bfa_fcxp_queue(fcxp, send_req);
822}
823
Jing Huang5fbe25c2010-10-18 17:17:23 -0700824/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700825 * Queue fcxp send request to foimrware.
826 */
827static void
828bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
829{
830 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
831 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
832 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
833 struct bfa_rport_s *rport = reqi->bfa_rport;
834
835 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
836 bfa_lpuid(bfa));
837
Jing Huangba816ea2010-10-18 17:10:50 -0700838 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700839 if (rport) {
840 send_req->rport_fw_hndl = rport->fw_handle;
Jing Huangba816ea2010-10-18 17:10:50 -0700841 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700842 if (send_req->max_frmsz == 0)
Jing Huangba816ea2010-10-18 17:10:50 -0700843 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700844 } else {
845 send_req->rport_fw_hndl = 0;
Jing Huangba816ea2010-10-18 17:10:50 -0700846 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700847 }
848
Jing Huangba816ea2010-10-18 17:10:50 -0700849 send_req->vf_id = cpu_to_be16(reqi->vf_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700850 send_req->lp_tag = reqi->lp_tag;
851 send_req->class = reqi->class;
852 send_req->rsp_timeout = rspi->rsp_timeout;
853 send_req->cts = reqi->cts;
854 send_req->fchs = reqi->fchs;
855
Jing Huangba816ea2010-10-18 17:10:50 -0700856 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
857 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700858
859 /*
860 * setup req sgles
861 */
862 if (fcxp->use_ireqbuf == 1) {
863 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
864 BFA_FCXP_REQ_PLD_PA(fcxp));
865 } else {
866 if (fcxp->nreq_sgles > 0) {
867 bfa_assert(fcxp->nreq_sgles == 1);
868 hal_fcxp_set_local_sges(send_req->req_sge,
869 reqi->req_tot_len,
870 fcxp->req_sga_cbfn(fcxp->caller,
871 0));
872 } else {
873 bfa_assert(reqi->req_tot_len == 0);
874 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
875 }
876 }
877
878 /*
879 * setup rsp sgles
880 */
881 if (fcxp->use_irspbuf == 1) {
882 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
883
884 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
885 BFA_FCXP_RSP_PLD_PA(fcxp));
886
887 } else {
888 if (fcxp->nrsp_sgles > 0) {
889 bfa_assert(fcxp->nrsp_sgles == 1);
890 hal_fcxp_set_local_sges(send_req->rsp_sge,
891 rspi->rsp_maxlen,
892 fcxp->rsp_sga_cbfn(fcxp->caller,
893 0));
894 } else {
895 bfa_assert(rspi->rsp_maxlen == 0);
896 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
897 }
898 }
899
900 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
901
902 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
903
904 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
905 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
906}
907
Jing Huang5fbe25c2010-10-18 17:17:23 -0700908/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700909 * hal_fcxp_api BFA FCXP API
910 */
911
Jing Huang5fbe25c2010-10-18 17:17:23 -0700912/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700913 * Allocate an FCXP instance to send a response or to send a request
914 * that has a response. Request/response buffers are allocated by caller.
915 *
916 * @param[in] bfa BFA bfa instance
917 * @param[in] nreq_sgles Number of SG elements required for request
918 * buffer. 0, if fcxp internal buffers are used.
919 * Use bfa_fcxp_get_reqbuf() to get the
920 * internal req buffer.
921 * @param[in] req_sgles SG elements describing request buffer. Will be
922 * copied in by BFA and hence can be freed on
923 * return from this function.
924 * @param[in] get_req_sga function ptr to be called to get a request SG
925 * Address (given the sge index).
926 * @param[in] get_req_sglen function ptr to be called to get a request SG
927 * len (given the sge index).
928 * @param[in] get_rsp_sga function ptr to be called to get a response SG
929 * Address (given the sge index).
930 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
931 * len (given the sge index).
932 *
933 * @return FCXP instance. NULL on failure.
934 */
935struct bfa_fcxp_s *
936bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
937 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
938 bfa_fcxp_get_sglen_t req_sglen_cbfn,
939 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
940 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
941{
942 struct bfa_fcxp_s *fcxp = NULL;
943
944 bfa_assert(bfa != NULL);
945
946 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
947 if (fcxp == NULL)
948 return NULL;
949
950 bfa_trc(bfa, fcxp->fcxp_tag);
951
952 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
953 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
954
955 return fcxp;
956}
957
Jing Huang5fbe25c2010-10-18 17:17:23 -0700958/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700959 * Get the internal request buffer pointer
960 *
961 * @param[in] fcxp BFA fcxp pointer
962 *
963 * @return pointer to the internal request buffer
964 */
965void *
966bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
967{
968 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
969 void *reqbuf;
970
971 bfa_assert(fcxp->use_ireqbuf == 1);
972 reqbuf = ((u8 *)mod->req_pld_list_kva) +
973 fcxp->fcxp_tag * mod->req_pld_sz;
974 return reqbuf;
975}
976
977u32
978bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
979{
980 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
981
982 return mod->req_pld_sz;
983}
984
Jing Huang5fbe25c2010-10-18 17:17:23 -0700985/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700986 * Get the internal response buffer pointer
987 *
988 * @param[in] fcxp BFA fcxp pointer
989 *
990 * @return pointer to the internal request buffer
991 */
992void *
993bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
994{
995 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
996 void *rspbuf;
997
998 bfa_assert(fcxp->use_irspbuf == 1);
999
1000 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1001 fcxp->fcxp_tag * mod->rsp_pld_sz;
1002 return rspbuf;
1003}
1004
Jing Huang5fbe25c2010-10-18 17:17:23 -07001005/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001006 * Free the BFA FCXP
1007 *
1008 * @param[in] fcxp BFA fcxp pointer
1009 *
1010 * @return void
1011 */
1012void
1013bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1014{
1015 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1016
1017 bfa_assert(fcxp != NULL);
1018 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1019 bfa_fcxp_put(fcxp);
1020}
1021
Jing Huang5fbe25c2010-10-18 17:17:23 -07001022/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001023 * Send a FCXP request
1024 *
1025 * @param[in] fcxp BFA fcxp pointer
1026 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1027 * @param[in] vf_id virtual Fabric ID
1028 * @param[in] lp_tag lport tag
1029 * @param[in] cts use Continous sequence
1030 * @param[in] cos fc Class of Service
1031 * @param[in] reqlen request length, does not include FCHS length
1032 * @param[in] fchs fc Header Pointer. The header content will be copied
1033 * in by BFA.
1034 *
1035 * @param[in] cbfn call back function to be called on receiving
1036 * the response
1037 * @param[in] cbarg arg for cbfn
1038 * @param[in] rsp_timeout
1039 * response timeout
1040 *
1041 * @return bfa_status_t
1042 */
1043void
1044bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1045 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1046 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1047 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1048{
1049 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1050 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1051 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1052 struct bfi_fcxp_send_req_s *send_req;
1053
1054 bfa_trc(bfa, fcxp->fcxp_tag);
1055
Jing Huang5fbe25c2010-10-18 17:17:23 -07001056 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001057 * setup request/response info
1058 */
1059 reqi->bfa_rport = rport;
1060 reqi->vf_id = vf_id;
1061 reqi->lp_tag = lp_tag;
1062 reqi->class = cos;
1063 rspi->rsp_timeout = rsp_timeout;
1064 reqi->cts = cts;
1065 reqi->fchs = *fchs;
1066 reqi->req_tot_len = reqlen;
1067 rspi->rsp_maxlen = rsp_maxlen;
1068 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1069 fcxp->send_cbarg = cbarg;
1070
Jing Huang5fbe25c2010-10-18 17:17:23 -07001071 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001072 * If no room in CPE queue, wait for space in request queue
1073 */
1074 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1075 if (!send_req) {
1076 bfa_trc(bfa, fcxp->fcxp_tag);
1077 fcxp->reqq_waiting = BFA_TRUE;
1078 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1079 return;
1080 }
1081
1082 bfa_fcxp_queue(fcxp, send_req);
1083}
1084
Jing Huang5fbe25c2010-10-18 17:17:23 -07001085/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001086 * Abort a BFA FCXP
1087 *
1088 * @param[in] fcxp BFA fcxp pointer
1089 *
1090 * @return void
1091 */
1092bfa_status_t
1093bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1094{
1095 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1096 bfa_assert(0);
1097 return BFA_STATUS_OK;
1098}
1099
1100void
1101bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1102 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1103 void *caller, int nreq_sgles,
1104 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1105 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1106 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1107 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1108{
1109 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1110
1111 bfa_assert(list_empty(&mod->fcxp_free_q));
1112
1113 wqe->alloc_cbfn = alloc_cbfn;
1114 wqe->alloc_cbarg = alloc_cbarg;
1115 wqe->caller = caller;
1116 wqe->bfa = bfa;
1117 wqe->nreq_sgles = nreq_sgles;
1118 wqe->nrsp_sgles = nrsp_sgles;
1119 wqe->req_sga_cbfn = req_sga_cbfn;
1120 wqe->req_sglen_cbfn = req_sglen_cbfn;
1121 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1122 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1123
1124 list_add_tail(&wqe->qe, &mod->wait_q);
1125}
1126
1127void
1128bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1129{
1130 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1131
1132 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
1133 list_del(&wqe->qe);
1134}
1135
1136void
1137bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1138{
Jing Huang5fbe25c2010-10-18 17:17:23 -07001139 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001140 * If waiting for room in request queue, cancel reqq wait
1141 * and free fcxp.
1142 */
1143 if (fcxp->reqq_waiting) {
1144 fcxp->reqq_waiting = BFA_FALSE;
1145 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1146 bfa_fcxp_free(fcxp);
1147 return;
1148 }
1149
1150 fcxp->send_cbfn = bfa_fcxp_null_comp;
1151}
1152
1153
1154
Jing Huang5fbe25c2010-10-18 17:17:23 -07001155/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001156 * hal_fcxp_public BFA FCXP public functions
1157 */
1158
1159void
1160bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1161{
1162 switch (msg->mhdr.msg_id) {
1163 case BFI_FCXP_I2H_SEND_RSP:
1164 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1165 break;
1166
1167 default:
1168 bfa_trc(bfa, msg->mhdr.msg_id);
1169 bfa_assert(0);
1170 }
1171}
1172
1173u32
1174bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1175{
1176 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1177
1178 return mod->rsp_pld_sz;
1179}
1180
1181
Jing Huang5fbe25c2010-10-18 17:17:23 -07001182/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001183 * BFA LPS state machine functions
1184 */
1185
Jing Huang5fbe25c2010-10-18 17:17:23 -07001186/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001187 * Init state -- no login
1188 */
1189static void
1190bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1191{
1192 bfa_trc(lps->bfa, lps->lp_tag);
1193 bfa_trc(lps->bfa, event);
1194
1195 switch (event) {
1196 case BFA_LPS_SM_LOGIN:
1197 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1198 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1199 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1200 } else {
1201 bfa_sm_set_state(lps, bfa_lps_sm_login);
1202 bfa_lps_send_login(lps);
1203 }
1204
1205 if (lps->fdisc)
1206 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1207 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1208 else
1209 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1210 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1211 break;
1212
1213 case BFA_LPS_SM_LOGOUT:
1214 bfa_lps_logout_comp(lps);
1215 break;
1216
1217 case BFA_LPS_SM_DELETE:
1218 bfa_lps_free(lps);
1219 break;
1220
1221 case BFA_LPS_SM_RX_CVL:
1222 case BFA_LPS_SM_OFFLINE:
1223 break;
1224
1225 case BFA_LPS_SM_FWRSP:
1226 /*
1227 * Could happen when fabric detects loopback and discards
1228 * the lps request. Fw will eventually sent out the timeout
1229 * Just ignore
1230 */
1231 break;
1232
1233 default:
1234 bfa_sm_fault(lps->bfa, event);
1235 }
1236}
1237
Jing Huang5fbe25c2010-10-18 17:17:23 -07001238/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001239 * login is in progress -- awaiting response from firmware
1240 */
1241static void
1242bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1243{
1244 bfa_trc(lps->bfa, lps->lp_tag);
1245 bfa_trc(lps->bfa, event);
1246
1247 switch (event) {
1248 case BFA_LPS_SM_FWRSP:
1249 if (lps->status == BFA_STATUS_OK) {
1250 bfa_sm_set_state(lps, bfa_lps_sm_online);
1251 if (lps->fdisc)
1252 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1253 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1254 else
1255 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1256 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1257 } else {
1258 bfa_sm_set_state(lps, bfa_lps_sm_init);
1259 if (lps->fdisc)
1260 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1261 BFA_PL_EID_LOGIN, 0,
1262 "FDISC Fail (RJT or timeout)");
1263 else
1264 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1265 BFA_PL_EID_LOGIN, 0,
1266 "FLOGI Fail (RJT or timeout)");
1267 }
1268 bfa_lps_login_comp(lps);
1269 break;
1270
1271 case BFA_LPS_SM_OFFLINE:
1272 bfa_sm_set_state(lps, bfa_lps_sm_init);
1273 break;
1274
1275 default:
1276 bfa_sm_fault(lps->bfa, event);
1277 }
1278}
1279
Jing Huang5fbe25c2010-10-18 17:17:23 -07001280/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001281 * login pending - awaiting space in request queue
1282 */
1283static void
1284bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1285{
1286 bfa_trc(lps->bfa, lps->lp_tag);
1287 bfa_trc(lps->bfa, event);
1288
1289 switch (event) {
1290 case BFA_LPS_SM_RESUME:
1291 bfa_sm_set_state(lps, bfa_lps_sm_login);
1292 break;
1293
1294 case BFA_LPS_SM_OFFLINE:
1295 bfa_sm_set_state(lps, bfa_lps_sm_init);
1296 bfa_reqq_wcancel(&lps->wqe);
1297 break;
1298
1299 case BFA_LPS_SM_RX_CVL:
1300 /*
1301 * Login was not even sent out; so when getting out
1302 * of this state, it will appear like a login retry
1303 * after Clear virtual link
1304 */
1305 break;
1306
1307 default:
1308 bfa_sm_fault(lps->bfa, event);
1309 }
1310}
1311
Jing Huang5fbe25c2010-10-18 17:17:23 -07001312/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001313 * login complete
1314 */
1315static void
1316bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1317{
1318 bfa_trc(lps->bfa, lps->lp_tag);
1319 bfa_trc(lps->bfa, event);
1320
1321 switch (event) {
1322 case BFA_LPS_SM_LOGOUT:
1323 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1324 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1325 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1326 } else {
1327 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1328 bfa_lps_send_logout(lps);
1329 }
1330 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1331 BFA_PL_EID_LOGO, 0, "Logout");
1332 break;
1333
1334 case BFA_LPS_SM_RX_CVL:
1335 bfa_sm_set_state(lps, bfa_lps_sm_init);
1336
1337 /* Let the vport module know about this event */
1338 bfa_lps_cvl_event(lps);
1339 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1340 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1341 break;
1342
1343 case BFA_LPS_SM_OFFLINE:
1344 case BFA_LPS_SM_DELETE:
1345 bfa_sm_set_state(lps, bfa_lps_sm_init);
1346 break;
1347
1348 default:
1349 bfa_sm_fault(lps->bfa, event);
1350 }
1351}
1352
Jing Huang5fbe25c2010-10-18 17:17:23 -07001353/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001354 * logout in progress - awaiting firmware response
1355 */
1356static void
1357bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1358{
1359 bfa_trc(lps->bfa, lps->lp_tag);
1360 bfa_trc(lps->bfa, event);
1361
1362 switch (event) {
1363 case BFA_LPS_SM_FWRSP:
1364 bfa_sm_set_state(lps, bfa_lps_sm_init);
1365 bfa_lps_logout_comp(lps);
1366 break;
1367
1368 case BFA_LPS_SM_OFFLINE:
1369 bfa_sm_set_state(lps, bfa_lps_sm_init);
1370 break;
1371
1372 default:
1373 bfa_sm_fault(lps->bfa, event);
1374 }
1375}
1376
Jing Huang5fbe25c2010-10-18 17:17:23 -07001377/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001378 * logout pending -- awaiting space in request queue
1379 */
1380static void
1381bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1382{
1383 bfa_trc(lps->bfa, lps->lp_tag);
1384 bfa_trc(lps->bfa, event);
1385
1386 switch (event) {
1387 case BFA_LPS_SM_RESUME:
1388 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1389 bfa_lps_send_logout(lps);
1390 break;
1391
1392 case BFA_LPS_SM_OFFLINE:
1393 bfa_sm_set_state(lps, bfa_lps_sm_init);
1394 bfa_reqq_wcancel(&lps->wqe);
1395 break;
1396
1397 default:
1398 bfa_sm_fault(lps->bfa, event);
1399 }
1400}
1401
1402
1403
Jing Huang5fbe25c2010-10-18 17:17:23 -07001404/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001405 * lps_pvt BFA LPS private functions
1406 */
1407
Jing Huang5fbe25c2010-10-18 17:17:23 -07001408/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001409 * return memory requirement
1410 */
1411static void
1412bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1413 u32 *dm_len)
1414{
1415 if (cfg->drvcfg.min_cfg)
1416 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1417 else
1418 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1419}
1420
Jing Huang5fbe25c2010-10-18 17:17:23 -07001421/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001422 * bfa module attach at initialization time
1423 */
1424static void
1425bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1426 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1427{
1428 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1429 struct bfa_lps_s *lps;
1430 int i;
1431
Jing Huang6a18b162010-10-18 17:08:54 -07001432 memset(mod, 0, sizeof(struct bfa_lps_mod_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001433 mod->num_lps = BFA_LPS_MAX_LPORTS;
1434 if (cfg->drvcfg.min_cfg)
1435 mod->num_lps = BFA_LPS_MIN_LPORTS;
1436 else
1437 mod->num_lps = BFA_LPS_MAX_LPORTS;
1438 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1439
1440 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1441
1442 INIT_LIST_HEAD(&mod->lps_free_q);
1443 INIT_LIST_HEAD(&mod->lps_active_q);
1444
1445 for (i = 0; i < mod->num_lps; i++, lps++) {
1446 lps->bfa = bfa;
1447 lps->lp_tag = (u8) i;
1448 lps->reqq = BFA_REQQ_LPS;
1449 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1450 list_add_tail(&lps->qe, &mod->lps_free_q);
1451 }
1452}
1453
1454static void
1455bfa_lps_detach(struct bfa_s *bfa)
1456{
1457}
1458
1459static void
1460bfa_lps_start(struct bfa_s *bfa)
1461{
1462}
1463
1464static void
1465bfa_lps_stop(struct bfa_s *bfa)
1466{
1467}
1468
Jing Huang5fbe25c2010-10-18 17:17:23 -07001469/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001470 * IOC in disabled state -- consider all lps offline
1471 */
1472static void
1473bfa_lps_iocdisable(struct bfa_s *bfa)
1474{
1475 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1476 struct bfa_lps_s *lps;
1477 struct list_head *qe, *qen;
1478
1479 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1480 lps = (struct bfa_lps_s *) qe;
1481 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1482 }
1483}
1484
Jing Huang5fbe25c2010-10-18 17:17:23 -07001485/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001486 * Firmware login response
1487 */
1488static void
1489bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1490{
1491 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1492 struct bfa_lps_s *lps;
1493
1494 bfa_assert(rsp->lp_tag < mod->num_lps);
1495 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1496
1497 lps->status = rsp->status;
1498 switch (rsp->status) {
1499 case BFA_STATUS_OK:
1500 lps->fport = rsp->f_port;
1501 lps->npiv_en = rsp->npiv_en;
1502 lps->lp_pid = rsp->lp_pid;
Jing Huangba816ea2010-10-18 17:10:50 -07001503 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001504 lps->pr_pwwn = rsp->port_name;
1505 lps->pr_nwwn = rsp->node_name;
1506 lps->auth_req = rsp->auth_req;
1507 lps->lp_mac = rsp->lp_mac;
1508 lps->brcd_switch = rsp->brcd_switch;
1509 lps->fcf_mac = rsp->fcf_mac;
1510
1511 break;
1512
1513 case BFA_STATUS_FABRIC_RJT:
1514 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1515 lps->lsrjt_expl = rsp->lsrjt_expl;
1516
1517 break;
1518
1519 case BFA_STATUS_EPROTOCOL:
1520 lps->ext_status = rsp->ext_status;
1521
1522 break;
1523
1524 default:
1525 /* Nothing to do with other status */
1526 break;
1527 }
1528
1529 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1530}
1531
Jing Huang5fbe25c2010-10-18 17:17:23 -07001532/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001533 * Firmware logout response
1534 */
1535static void
1536bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1537{
1538 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1539 struct bfa_lps_s *lps;
1540
1541 bfa_assert(rsp->lp_tag < mod->num_lps);
1542 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1543
1544 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1545}
1546
Jing Huang5fbe25c2010-10-18 17:17:23 -07001547/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001548 * Firmware received a Clear virtual link request (for FCoE)
1549 */
1550static void
1551bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1552{
1553 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1554 struct bfa_lps_s *lps;
1555
1556 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1557
1558 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1559}
1560
Jing Huang5fbe25c2010-10-18 17:17:23 -07001561/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001562 * Space is available in request queue, resume queueing request to firmware.
1563 */
1564static void
1565bfa_lps_reqq_resume(void *lps_arg)
1566{
1567 struct bfa_lps_s *lps = lps_arg;
1568
1569 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1570}
1571
Jing Huang5fbe25c2010-10-18 17:17:23 -07001572/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001573 * lps is freed -- triggered by vport delete
1574 */
1575static void
1576bfa_lps_free(struct bfa_lps_s *lps)
1577{
1578 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1579
1580 lps->lp_pid = 0;
1581 list_del(&lps->qe);
1582 list_add_tail(&lps->qe, &mod->lps_free_q);
1583}
1584
Jing Huang5fbe25c2010-10-18 17:17:23 -07001585/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001586 * send login request to firmware
1587 */
1588static void
1589bfa_lps_send_login(struct bfa_lps_s *lps)
1590{
1591 struct bfi_lps_login_req_s *m;
1592
1593 m = bfa_reqq_next(lps->bfa, lps->reqq);
1594 bfa_assert(m);
1595
1596 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1597 bfa_lpuid(lps->bfa));
1598
1599 m->lp_tag = lps->lp_tag;
1600 m->alpa = lps->alpa;
Jing Huangba816ea2010-10-18 17:10:50 -07001601 m->pdu_size = cpu_to_be16(lps->pdusz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001602 m->pwwn = lps->pwwn;
1603 m->nwwn = lps->nwwn;
1604 m->fdisc = lps->fdisc;
1605 m->auth_en = lps->auth_en;
1606
1607 bfa_reqq_produce(lps->bfa, lps->reqq);
1608}
1609
Jing Huang5fbe25c2010-10-18 17:17:23 -07001610/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001611 * send logout request to firmware
1612 */
1613static void
1614bfa_lps_send_logout(struct bfa_lps_s *lps)
1615{
1616 struct bfi_lps_logout_req_s *m;
1617
1618 m = bfa_reqq_next(lps->bfa, lps->reqq);
1619 bfa_assert(m);
1620
1621 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1622 bfa_lpuid(lps->bfa));
1623
1624 m->lp_tag = lps->lp_tag;
1625 m->port_name = lps->pwwn;
1626 bfa_reqq_produce(lps->bfa, lps->reqq);
1627}
1628
Jing Huang5fbe25c2010-10-18 17:17:23 -07001629/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001630 * Indirect login completion handler for non-fcs
1631 */
1632static void
1633bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1634{
1635 struct bfa_lps_s *lps = arg;
1636
1637 if (!complete)
1638 return;
1639
1640 if (lps->fdisc)
1641 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1642 else
1643 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1644}
1645
Jing Huang5fbe25c2010-10-18 17:17:23 -07001646/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001647 * Login completion handler -- direct call for fcs, queue for others
1648 */
1649static void
1650bfa_lps_login_comp(struct bfa_lps_s *lps)
1651{
1652 if (!lps->bfa->fcs) {
1653 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1654 lps);
1655 return;
1656 }
1657
1658 if (lps->fdisc)
1659 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1660 else
1661 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1662}
1663
Jing Huang5fbe25c2010-10-18 17:17:23 -07001664/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001665 * Indirect logout completion handler for non-fcs
1666 */
1667static void
1668bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1669{
1670 struct bfa_lps_s *lps = arg;
1671
1672 if (!complete)
1673 return;
1674
1675 if (lps->fdisc)
1676 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1677}
1678
Jing Huang5fbe25c2010-10-18 17:17:23 -07001679/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001680 * Logout completion handler -- direct call for fcs, queue for others
1681 */
1682static void
1683bfa_lps_logout_comp(struct bfa_lps_s *lps)
1684{
1685 if (!lps->bfa->fcs) {
1686 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1687 lps);
1688 return;
1689 }
1690 if (lps->fdisc)
1691 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1692}
1693
Jing Huang5fbe25c2010-10-18 17:17:23 -07001694/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001695 * Clear virtual link completion handler for non-fcs
1696 */
1697static void
1698bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1699{
1700 struct bfa_lps_s *lps = arg;
1701
1702 if (!complete)
1703 return;
1704
1705 /* Clear virtual link to base port will result in link down */
1706 if (lps->fdisc)
1707 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1708}
1709
Jing Huang5fbe25c2010-10-18 17:17:23 -07001710/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001711 * Received Clear virtual link event --direct call for fcs,
1712 * queue for others
1713 */
1714static void
1715bfa_lps_cvl_event(struct bfa_lps_s *lps)
1716{
1717 if (!lps->bfa->fcs) {
1718 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1719 lps);
1720 return;
1721 }
1722
1723 /* Clear virtual link to base port will result in link down */
1724 if (lps->fdisc)
1725 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1726}
1727
1728
1729
Jing Huang5fbe25c2010-10-18 17:17:23 -07001730/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001731 * lps_public BFA LPS public functions
1732 */
1733
1734u32
1735bfa_lps_get_max_vport(struct bfa_s *bfa)
1736{
1737 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1738 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1739 else
1740 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1741}
1742
Jing Huang5fbe25c2010-10-18 17:17:23 -07001743/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001744 * Allocate a lport srvice tag.
1745 */
1746struct bfa_lps_s *
1747bfa_lps_alloc(struct bfa_s *bfa)
1748{
1749 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1750 struct bfa_lps_s *lps = NULL;
1751
1752 bfa_q_deq(&mod->lps_free_q, &lps);
1753
1754 if (lps == NULL)
1755 return NULL;
1756
1757 list_add_tail(&lps->qe, &mod->lps_active_q);
1758
1759 bfa_sm_set_state(lps, bfa_lps_sm_init);
1760 return lps;
1761}
1762
Jing Huang5fbe25c2010-10-18 17:17:23 -07001763/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001764 * Free lport service tag. This can be called anytime after an alloc.
1765 * No need to wait for any pending login/logout completions.
1766 */
1767void
1768bfa_lps_delete(struct bfa_lps_s *lps)
1769{
1770 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1771}
1772
Jing Huang5fbe25c2010-10-18 17:17:23 -07001773/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001774 * Initiate a lport login.
1775 */
1776void
1777bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1778 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1779{
1780 lps->uarg = uarg;
1781 lps->alpa = alpa;
1782 lps->pdusz = pdusz;
1783 lps->pwwn = pwwn;
1784 lps->nwwn = nwwn;
1785 lps->fdisc = BFA_FALSE;
1786 lps->auth_en = auth_en;
1787 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1788}
1789
Jing Huang5fbe25c2010-10-18 17:17:23 -07001790/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001791 * Initiate a lport fdisc login.
1792 */
1793void
1794bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1795 wwn_t nwwn)
1796{
1797 lps->uarg = uarg;
1798 lps->alpa = 0;
1799 lps->pdusz = pdusz;
1800 lps->pwwn = pwwn;
1801 lps->nwwn = nwwn;
1802 lps->fdisc = BFA_TRUE;
1803 lps->auth_en = BFA_FALSE;
1804 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1805}
1806
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001807
Jing Huang5fbe25c2010-10-18 17:17:23 -07001808/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001809 * Initiate a lport FDSIC logout.
1810 */
1811void
1812bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1813{
1814 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1815}
1816
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001817
Jing Huang5fbe25c2010-10-18 17:17:23 -07001818/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001819 * Return lport services tag given the pid
1820 */
1821u8
1822bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1823{
1824 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1825 struct bfa_lps_s *lps;
1826 int i;
1827
1828 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1829 if (lps->lp_pid == pid)
1830 return lps->lp_tag;
1831 }
1832
1833 /* Return base port tag anyway */
1834 return 0;
1835}
1836
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001837
Jing Huang5fbe25c2010-10-18 17:17:23 -07001838/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001839 * return port id assigned to the base lport
1840 */
1841u32
1842bfa_lps_get_base_pid(struct bfa_s *bfa)
1843{
1844 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1845
1846 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1847}
1848
Jing Huang5fbe25c2010-10-18 17:17:23 -07001849/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001850 * LPS firmware message class handler.
1851 */
1852void
1853bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1854{
1855 union bfi_lps_i2h_msg_u msg;
1856
1857 bfa_trc(bfa, m->mhdr.msg_id);
1858 msg.msg = m;
1859
1860 switch (m->mhdr.msg_id) {
1861 case BFI_LPS_H2I_LOGIN_RSP:
1862 bfa_lps_login_rsp(bfa, msg.login_rsp);
1863 break;
1864
1865 case BFI_LPS_H2I_LOGOUT_RSP:
1866 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1867 break;
1868
1869 case BFI_LPS_H2I_CVL_EVENT:
1870 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
1871 break;
1872
1873 default:
1874 bfa_trc(bfa, m->mhdr.msg_id);
1875 bfa_assert(0);
1876 }
1877}
1878
Jing Huang5fbe25c2010-10-18 17:17:23 -07001879/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001880 * FC PORT state machine functions
1881 */
1882static void
1883bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
1884 enum bfa_fcport_sm_event event)
1885{
1886 bfa_trc(fcport->bfa, event);
1887
1888 switch (event) {
1889 case BFA_FCPORT_SM_START:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001890 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001891 * Start event after IOC is configured and BFA is started.
1892 */
1893 if (bfa_fcport_send_enable(fcport)) {
1894 bfa_trc(fcport->bfa, BFA_TRUE);
1895 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1896 } else {
1897 bfa_trc(fcport->bfa, BFA_FALSE);
1898 bfa_sm_set_state(fcport,
1899 bfa_fcport_sm_enabling_qwait);
1900 }
1901 break;
1902
1903 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001904 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001905 * Port is persistently configured to be in enabled state. Do
1906 * not change state. Port enabling is done when START event is
1907 * received.
1908 */
1909 break;
1910
1911 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001912 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001913 * If a port is persistently configured to be disabled, the
1914 * first event will a port disable request.
1915 */
1916 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
1917 break;
1918
1919 case BFA_FCPORT_SM_HWFAIL:
1920 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
1921 break;
1922
1923 default:
1924 bfa_sm_fault(fcport->bfa, event);
1925 }
1926}
1927
1928static void
1929bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
1930 enum bfa_fcport_sm_event event)
1931{
1932 char pwwn_buf[BFA_STRING_32];
1933 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
1934 bfa_trc(fcport->bfa, event);
1935
1936 switch (event) {
1937 case BFA_FCPORT_SM_QRESUME:
1938 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
1939 bfa_fcport_send_enable(fcport);
1940 break;
1941
1942 case BFA_FCPORT_SM_STOP:
1943 bfa_reqq_wcancel(&fcport->reqq_wait);
1944 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
1945 break;
1946
1947 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001948 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001949 * Already enable is in progress.
1950 */
1951 break;
1952
1953 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001954 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001955 * Just send disable request to firmware when room becomes
1956 * available in request queue.
1957 */
1958 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
1959 bfa_reqq_wcancel(&fcport->reqq_wait);
1960 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
1961 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
1962 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08001963 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001964 "Base port disabled: WWN = %s\n", pwwn_buf);
1965 break;
1966
1967 case BFA_FCPORT_SM_LINKUP:
1968 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07001969 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001970 * Possible to get link events when doing back-to-back
1971 * enable/disables.
1972 */
1973 break;
1974
1975 case BFA_FCPORT_SM_HWFAIL:
1976 bfa_reqq_wcancel(&fcport->reqq_wait);
1977 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
1978 break;
1979
1980 default:
1981 bfa_sm_fault(fcport->bfa, event);
1982 }
1983}
1984
1985static void
1986bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
1987 enum bfa_fcport_sm_event event)
1988{
1989 char pwwn_buf[BFA_STRING_32];
1990 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
1991 bfa_trc(fcport->bfa, event);
1992
1993 switch (event) {
1994 case BFA_FCPORT_SM_FWRSP:
1995 case BFA_FCPORT_SM_LINKDOWN:
1996 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
1997 break;
1998
1999 case BFA_FCPORT_SM_LINKUP:
2000 bfa_fcport_update_linkinfo(fcport);
2001 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2002
2003 bfa_assert(fcport->event_cbfn);
2004 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2005 break;
2006
2007 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002008 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002009 * Already being enabled.
2010 */
2011 break;
2012
2013 case BFA_FCPORT_SM_DISABLE:
2014 if (bfa_fcport_send_disable(fcport))
2015 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2016 else
2017 bfa_sm_set_state(fcport,
2018 bfa_fcport_sm_disabling_qwait);
2019
2020 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2021 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2022 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002023 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002024 "Base port disabled: WWN = %s\n", pwwn_buf);
2025 break;
2026
2027 case BFA_FCPORT_SM_STOP:
2028 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2029 break;
2030
2031 case BFA_FCPORT_SM_HWFAIL:
2032 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2033 break;
2034
2035 default:
2036 bfa_sm_fault(fcport->bfa, event);
2037 }
2038}
2039
2040static void
2041bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2042 enum bfa_fcport_sm_event event)
2043{
2044 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2045 char pwwn_buf[BFA_STRING_32];
2046 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2047
2048 bfa_trc(fcport->bfa, event);
2049
2050 switch (event) {
2051 case BFA_FCPORT_SM_LINKUP:
2052 bfa_fcport_update_linkinfo(fcport);
2053 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2054 bfa_assert(fcport->event_cbfn);
2055 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2056 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2057 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2058
2059 bfa_trc(fcport->bfa,
2060 pevent->link_state.vc_fcf.fcf.fipenabled);
2061 bfa_trc(fcport->bfa,
2062 pevent->link_state.vc_fcf.fcf.fipfailed);
2063
2064 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2065 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2066 BFA_PL_EID_FIP_FCF_DISC, 0,
2067 "FIP FCF Discovery Failed");
2068 else
2069 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2070 BFA_PL_EID_FIP_FCF_DISC, 0,
2071 "FIP FCF Discovered");
2072 }
2073
2074 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2075 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002076 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002077 "Base port online: WWN = %s\n", pwwn_buf);
2078 break;
2079
2080 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002081 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002082 * Possible to get link down event.
2083 */
2084 break;
2085
2086 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002087 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002088 * Already enabled.
2089 */
2090 break;
2091
2092 case BFA_FCPORT_SM_DISABLE:
2093 if (bfa_fcport_send_disable(fcport))
2094 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2095 else
2096 bfa_sm_set_state(fcport,
2097 bfa_fcport_sm_disabling_qwait);
2098
2099 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2100 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2101 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002102 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002103 "Base port disabled: WWN = %s\n", pwwn_buf);
2104 break;
2105
2106 case BFA_FCPORT_SM_STOP:
2107 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2108 break;
2109
2110 case BFA_FCPORT_SM_HWFAIL:
2111 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2112 break;
2113
2114 default:
2115 bfa_sm_fault(fcport->bfa, event);
2116 }
2117}
2118
2119static void
2120bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2121 enum bfa_fcport_sm_event event)
2122{
2123 char pwwn_buf[BFA_STRING_32];
2124 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2125
2126 bfa_trc(fcport->bfa, event);
2127
2128 switch (event) {
2129 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002130 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002131 * Already enabled.
2132 */
2133 break;
2134
2135 case BFA_FCPORT_SM_DISABLE:
2136 if (bfa_fcport_send_disable(fcport))
2137 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2138 else
2139 bfa_sm_set_state(fcport,
2140 bfa_fcport_sm_disabling_qwait);
2141
2142 bfa_fcport_reset_linkinfo(fcport);
2143 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2144 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2145 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2146 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002147 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002148 "Base port offline: WWN = %s\n", pwwn_buf);
Jing Huang88166242010-12-09 17:11:53 -08002149 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002150 "Base port disabled: WWN = %s\n", pwwn_buf);
2151 break;
2152
2153 case BFA_FCPORT_SM_LINKDOWN:
2154 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2155 bfa_fcport_reset_linkinfo(fcport);
2156 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2157 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2158 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2159 wwn2str(pwwn_buf, fcport->pwwn);
2160 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002161 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002162 "Base port offline: WWN = %s\n", pwwn_buf);
2163 else
Jing Huang88166242010-12-09 17:11:53 -08002164 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002165 "Base port (WWN = %s) "
2166 "lost fabric connectivity\n", pwwn_buf);
2167 break;
2168
2169 case BFA_FCPORT_SM_STOP:
2170 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2171 bfa_fcport_reset_linkinfo(fcport);
2172 wwn2str(pwwn_buf, fcport->pwwn);
2173 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002174 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002175 "Base port offline: WWN = %s\n", pwwn_buf);
2176 else
Jing Huang88166242010-12-09 17:11:53 -08002177 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002178 "Base port (WWN = %s) "
2179 "lost fabric connectivity\n", pwwn_buf);
2180 break;
2181
2182 case BFA_FCPORT_SM_HWFAIL:
2183 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2184 bfa_fcport_reset_linkinfo(fcport);
2185 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2186 wwn2str(pwwn_buf, fcport->pwwn);
2187 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002188 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002189 "Base port offline: WWN = %s\n", pwwn_buf);
2190 else
Jing Huang88166242010-12-09 17:11:53 -08002191 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002192 "Base port (WWN = %s) "
2193 "lost fabric connectivity\n", pwwn_buf);
2194 break;
2195
2196 default:
2197 bfa_sm_fault(fcport->bfa, event);
2198 }
2199}
2200
2201static void
2202bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2203 enum bfa_fcport_sm_event event)
2204{
2205 bfa_trc(fcport->bfa, event);
2206
2207 switch (event) {
2208 case BFA_FCPORT_SM_QRESUME:
2209 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2210 bfa_fcport_send_disable(fcport);
2211 break;
2212
2213 case BFA_FCPORT_SM_STOP:
2214 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2215 bfa_reqq_wcancel(&fcport->reqq_wait);
2216 break;
2217
2218 case BFA_FCPORT_SM_ENABLE:
2219 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2220 break;
2221
2222 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002223 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002224 * Already being disabled.
2225 */
2226 break;
2227
2228 case BFA_FCPORT_SM_LINKUP:
2229 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002230 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002231 * Possible to get link events when doing back-to-back
2232 * enable/disables.
2233 */
2234 break;
2235
2236 case BFA_FCPORT_SM_HWFAIL:
2237 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2238 bfa_reqq_wcancel(&fcport->reqq_wait);
2239 break;
2240
2241 default:
2242 bfa_sm_fault(fcport->bfa, event);
2243 }
2244}
2245
2246static void
2247bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2248 enum bfa_fcport_sm_event event)
2249{
2250 bfa_trc(fcport->bfa, event);
2251
2252 switch (event) {
2253 case BFA_FCPORT_SM_QRESUME:
2254 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2255 bfa_fcport_send_disable(fcport);
2256 if (bfa_fcport_send_enable(fcport))
2257 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2258 else
2259 bfa_sm_set_state(fcport,
2260 bfa_fcport_sm_enabling_qwait);
2261 break;
2262
2263 case BFA_FCPORT_SM_STOP:
2264 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2265 bfa_reqq_wcancel(&fcport->reqq_wait);
2266 break;
2267
2268 case BFA_FCPORT_SM_ENABLE:
2269 break;
2270
2271 case BFA_FCPORT_SM_DISABLE:
2272 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2273 break;
2274
2275 case BFA_FCPORT_SM_LINKUP:
2276 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002277 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002278 * Possible to get link events when doing back-to-back
2279 * enable/disables.
2280 */
2281 break;
2282
2283 case BFA_FCPORT_SM_HWFAIL:
2284 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2285 bfa_reqq_wcancel(&fcport->reqq_wait);
2286 break;
2287
2288 default:
2289 bfa_sm_fault(fcport->bfa, event);
2290 }
2291}
2292
2293static void
2294bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2295 enum bfa_fcport_sm_event event)
2296{
2297 char pwwn_buf[BFA_STRING_32];
2298 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2299 bfa_trc(fcport->bfa, event);
2300
2301 switch (event) {
2302 case BFA_FCPORT_SM_FWRSP:
2303 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2304 break;
2305
2306 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002307 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002308 * Already being disabled.
2309 */
2310 break;
2311
2312 case BFA_FCPORT_SM_ENABLE:
2313 if (bfa_fcport_send_enable(fcport))
2314 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2315 else
2316 bfa_sm_set_state(fcport,
2317 bfa_fcport_sm_enabling_qwait);
2318
2319 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2320 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2321 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002322 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002323 "Base port enabled: WWN = %s\n", pwwn_buf);
2324 break;
2325
2326 case BFA_FCPORT_SM_STOP:
2327 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2328 break;
2329
2330 case BFA_FCPORT_SM_LINKUP:
2331 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002332 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002333 * Possible to get link events when doing back-to-back
2334 * enable/disables.
2335 */
2336 break;
2337
2338 case BFA_FCPORT_SM_HWFAIL:
2339 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2340 break;
2341
2342 default:
2343 bfa_sm_fault(fcport->bfa, event);
2344 }
2345}
2346
2347static void
2348bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2349 enum bfa_fcport_sm_event event)
2350{
2351 char pwwn_buf[BFA_STRING_32];
2352 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2353 bfa_trc(fcport->bfa, event);
2354
2355 switch (event) {
2356 case BFA_FCPORT_SM_START:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002357 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002358 * Ignore start event for a port that is disabled.
2359 */
2360 break;
2361
2362 case BFA_FCPORT_SM_STOP:
2363 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2364 break;
2365
2366 case BFA_FCPORT_SM_ENABLE:
2367 if (bfa_fcport_send_enable(fcport))
2368 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2369 else
2370 bfa_sm_set_state(fcport,
2371 bfa_fcport_sm_enabling_qwait);
2372
2373 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2374 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2375 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002376 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002377 "Base port enabled: WWN = %s\n", pwwn_buf);
2378 break;
2379
2380 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002381 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002382 * Already disabled.
2383 */
2384 break;
2385
2386 case BFA_FCPORT_SM_HWFAIL:
2387 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2388 break;
2389
2390 default:
2391 bfa_sm_fault(fcport->bfa, event);
2392 }
2393}
2394
2395static void
2396bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2397 enum bfa_fcport_sm_event event)
2398{
2399 bfa_trc(fcport->bfa, event);
2400
2401 switch (event) {
2402 case BFA_FCPORT_SM_START:
2403 if (bfa_fcport_send_enable(fcport))
2404 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2405 else
2406 bfa_sm_set_state(fcport,
2407 bfa_fcport_sm_enabling_qwait);
2408 break;
2409
2410 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002411 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002412 * Ignore all other events.
2413 */
2414 ;
2415 }
2416}
2417
Jing Huang5fbe25c2010-10-18 17:17:23 -07002418/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002419 * Port is enabled. IOC is down/failed.
2420 */
2421static void
2422bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2423 enum bfa_fcport_sm_event event)
2424{
2425 bfa_trc(fcport->bfa, event);
2426
2427 switch (event) {
2428 case BFA_FCPORT_SM_START:
2429 if (bfa_fcport_send_enable(fcport))
2430 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2431 else
2432 bfa_sm_set_state(fcport,
2433 bfa_fcport_sm_enabling_qwait);
2434 break;
2435
2436 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002437 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002438 * Ignore all events.
2439 */
2440 ;
2441 }
2442}
2443
Jing Huang5fbe25c2010-10-18 17:17:23 -07002444/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002445 * Port is disabled. IOC is down/failed.
2446 */
2447static void
2448bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2449 enum bfa_fcport_sm_event event)
2450{
2451 bfa_trc(fcport->bfa, event);
2452
2453 switch (event) {
2454 case BFA_FCPORT_SM_START:
2455 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2456 break;
2457
2458 case BFA_FCPORT_SM_ENABLE:
2459 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2460 break;
2461
2462 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002463 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002464 * Ignore all events.
2465 */
2466 ;
2467 }
2468}
2469
Jing Huang5fbe25c2010-10-18 17:17:23 -07002470/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002471 * Link state is down
2472 */
2473static void
2474bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2475 enum bfa_fcport_ln_sm_event event)
2476{
2477 bfa_trc(ln->fcport->bfa, event);
2478
2479 switch (event) {
2480 case BFA_FCPORT_LN_SM_LINKUP:
2481 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2482 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2483 break;
2484
2485 default:
2486 bfa_sm_fault(ln->fcport->bfa, event);
2487 }
2488}
2489
Jing Huang5fbe25c2010-10-18 17:17:23 -07002490/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002491 * Link state is waiting for down notification
2492 */
2493static void
2494bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2495 enum bfa_fcport_ln_sm_event event)
2496{
2497 bfa_trc(ln->fcport->bfa, event);
2498
2499 switch (event) {
2500 case BFA_FCPORT_LN_SM_LINKUP:
2501 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2502 break;
2503
2504 case BFA_FCPORT_LN_SM_NOTIFICATION:
2505 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2506 break;
2507
2508 default:
2509 bfa_sm_fault(ln->fcport->bfa, event);
2510 }
2511}
2512
Jing Huang5fbe25c2010-10-18 17:17:23 -07002513/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002514 * Link state is waiting for down notification and there is a pending up
2515 */
2516static void
2517bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2518 enum bfa_fcport_ln_sm_event event)
2519{
2520 bfa_trc(ln->fcport->bfa, event);
2521
2522 switch (event) {
2523 case BFA_FCPORT_LN_SM_LINKDOWN:
2524 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2525 break;
2526
2527 case BFA_FCPORT_LN_SM_NOTIFICATION:
2528 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2529 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2530 break;
2531
2532 default:
2533 bfa_sm_fault(ln->fcport->bfa, event);
2534 }
2535}
2536
Jing Huang5fbe25c2010-10-18 17:17:23 -07002537/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002538 * Link state is up
2539 */
2540static void
2541bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2542 enum bfa_fcport_ln_sm_event event)
2543{
2544 bfa_trc(ln->fcport->bfa, event);
2545
2546 switch (event) {
2547 case BFA_FCPORT_LN_SM_LINKDOWN:
2548 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2549 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2550 break;
2551
2552 default:
2553 bfa_sm_fault(ln->fcport->bfa, event);
2554 }
2555}
2556
Jing Huang5fbe25c2010-10-18 17:17:23 -07002557/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002558 * Link state is waiting for up notification
2559 */
2560static void
2561bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2562 enum bfa_fcport_ln_sm_event event)
2563{
2564 bfa_trc(ln->fcport->bfa, event);
2565
2566 switch (event) {
2567 case BFA_FCPORT_LN_SM_LINKDOWN:
2568 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2569 break;
2570
2571 case BFA_FCPORT_LN_SM_NOTIFICATION:
2572 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2573 break;
2574
2575 default:
2576 bfa_sm_fault(ln->fcport->bfa, event);
2577 }
2578}
2579
Jing Huang5fbe25c2010-10-18 17:17:23 -07002580/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002581 * Link state is waiting for up notification and there is a pending down
2582 */
2583static void
2584bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2585 enum bfa_fcport_ln_sm_event event)
2586{
2587 bfa_trc(ln->fcport->bfa, event);
2588
2589 switch (event) {
2590 case BFA_FCPORT_LN_SM_LINKUP:
2591 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2592 break;
2593
2594 case BFA_FCPORT_LN_SM_NOTIFICATION:
2595 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2596 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2597 break;
2598
2599 default:
2600 bfa_sm_fault(ln->fcport->bfa, event);
2601 }
2602}
2603
Jing Huang5fbe25c2010-10-18 17:17:23 -07002604/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002605 * Link state is waiting for up notification and there are pending down and up
2606 */
2607static void
2608bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2609 enum bfa_fcport_ln_sm_event event)
2610{
2611 bfa_trc(ln->fcport->bfa, event);
2612
2613 switch (event) {
2614 case BFA_FCPORT_LN_SM_LINKDOWN:
2615 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2616 break;
2617
2618 case BFA_FCPORT_LN_SM_NOTIFICATION:
2619 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2620 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2621 break;
2622
2623 default:
2624 bfa_sm_fault(ln->fcport->bfa, event);
2625 }
2626}
2627
2628
2629
Jing Huang5fbe25c2010-10-18 17:17:23 -07002630/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002631 * hal_port_private
2632 */
2633
2634static void
2635__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2636{
2637 struct bfa_fcport_ln_s *ln = cbarg;
2638
2639 if (complete)
2640 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2641 else
2642 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2643}
2644
Jing Huang5fbe25c2010-10-18 17:17:23 -07002645/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002646 * Send SCN notification to upper layers.
2647 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2648 */
2649static void
2650bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2651 bfa_boolean_t trunk)
2652{
2653 if (fcport->cfg.trunked && !trunk)
2654 return;
2655
2656 switch (event) {
2657 case BFA_PORT_LINKUP:
2658 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2659 break;
2660 case BFA_PORT_LINKDOWN:
2661 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2662 break;
2663 default:
2664 bfa_assert(0);
2665 }
2666}
2667
2668static void
2669bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2670{
2671 struct bfa_fcport_s *fcport = ln->fcport;
2672
2673 if (fcport->bfa->fcs) {
2674 fcport->event_cbfn(fcport->event_cbarg, event);
2675 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2676 } else {
2677 ln->ln_event = event;
2678 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2679 __bfa_cb_fcport_event, ln);
2680 }
2681}
2682
2683#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2684 BFA_CACHELINE_SZ))
2685
2686static void
2687bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2688 u32 *dm_len)
2689{
2690 *dm_len += FCPORT_STATS_DMA_SZ;
2691}
2692
2693static void
2694bfa_fcport_qresume(void *cbarg)
2695{
2696 struct bfa_fcport_s *fcport = cbarg;
2697
2698 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2699}
2700
2701static void
2702bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2703{
2704 u8 *dm_kva;
2705 u64 dm_pa;
2706
2707 dm_kva = bfa_meminfo_dma_virt(meminfo);
2708 dm_pa = bfa_meminfo_dma_phys(meminfo);
2709
2710 fcport->stats_kva = dm_kva;
2711 fcport->stats_pa = dm_pa;
2712 fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
2713
2714 dm_kva += FCPORT_STATS_DMA_SZ;
2715 dm_pa += FCPORT_STATS_DMA_SZ;
2716
2717 bfa_meminfo_dma_virt(meminfo) = dm_kva;
2718 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2719}
2720
Jing Huang5fbe25c2010-10-18 17:17:23 -07002721/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002722 * Memory initialization.
2723 */
2724static void
2725bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2726 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2727{
2728 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2729 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2730 struct bfa_fcport_ln_s *ln = &fcport->ln;
2731 struct bfa_timeval_s tv;
2732
Jing Huang6a18b162010-10-18 17:08:54 -07002733 memset(fcport, 0, sizeof(struct bfa_fcport_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002734 fcport->bfa = bfa;
2735 ln->fcport = fcport;
2736
2737 bfa_fcport_mem_claim(fcport, meminfo);
2738
2739 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2740 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2741
Jing Huang5fbe25c2010-10-18 17:17:23 -07002742 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002743 * initialize time stamp for stats reset
2744 */
2745 bfa_os_gettimeofday(&tv);
2746 fcport->stats_reset_time = tv.tv_sec;
2747
Jing Huang5fbe25c2010-10-18 17:17:23 -07002748 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002749 * initialize and set default configuration
2750 */
2751 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2752 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2753 port_cfg->trunked = BFA_FALSE;
2754 port_cfg->maxfrsize = 0;
2755
2756 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2757
2758 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2759}
2760
2761static void
2762bfa_fcport_detach(struct bfa_s *bfa)
2763{
2764}
2765
Jing Huang5fbe25c2010-10-18 17:17:23 -07002766/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002767 * Called when IOC is ready.
2768 */
2769static void
2770bfa_fcport_start(struct bfa_s *bfa)
2771{
2772 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2773}
2774
Jing Huang5fbe25c2010-10-18 17:17:23 -07002775/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002776 * Called before IOC is stopped.
2777 */
2778static void
2779bfa_fcport_stop(struct bfa_s *bfa)
2780{
2781 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2782 bfa_trunk_iocdisable(bfa);
2783}
2784
Jing Huang5fbe25c2010-10-18 17:17:23 -07002785/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002786 * Called when IOC failure is detected.
2787 */
2788static void
2789bfa_fcport_iocdisable(struct bfa_s *bfa)
2790{
2791 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2792
2793 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2794 bfa_trunk_iocdisable(bfa);
2795}
2796
2797static void
2798bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2799{
2800 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2801 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2802
2803 fcport->speed = pevent->link_state.speed;
2804 fcport->topology = pevent->link_state.topology;
2805
2806 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2807 fcport->myalpa = 0;
2808
2809 /* QoS Details */
Jing Huang6a18b162010-10-18 17:08:54 -07002810 fcport->qos_attr = pevent->link_state.qos_attr;
2811 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002812
Jing Huang5fbe25c2010-10-18 17:17:23 -07002813 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002814 * update trunk state if applicable
2815 */
2816 if (!fcport->cfg.trunked)
2817 trunk->attr.state = BFA_TRUNK_DISABLED;
2818
2819 /* update FCoE specific */
Jing Huangba816ea2010-10-18 17:10:50 -07002820 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002821
2822 bfa_trc(fcport->bfa, fcport->speed);
2823 bfa_trc(fcport->bfa, fcport->topology);
2824}
2825
2826static void
2827bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2828{
2829 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2830 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
2831}
2832
Jing Huang5fbe25c2010-10-18 17:17:23 -07002833/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002834 * Send port enable message to firmware.
2835 */
2836static bfa_boolean_t
2837bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2838{
2839 struct bfi_fcport_enable_req_s *m;
2840
Jing Huang5fbe25c2010-10-18 17:17:23 -07002841 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002842 * Increment message tag before queue check, so that responses to old
2843 * requests are discarded.
2844 */
2845 fcport->msgtag++;
2846
Jing Huang5fbe25c2010-10-18 17:17:23 -07002847 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002848 * check for room in queue to send request now
2849 */
2850 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2851 if (!m) {
2852 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2853 &fcport->reqq_wait);
2854 return BFA_FALSE;
2855 }
2856
2857 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
2858 bfa_lpuid(fcport->bfa));
2859 m->nwwn = fcport->nwwn;
2860 m->pwwn = fcport->pwwn;
2861 m->port_cfg = fcport->cfg;
2862 m->msgtag = fcport->msgtag;
Jing Huangba816ea2010-10-18 17:10:50 -07002863 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002864 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
2865 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
2866 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
2867
Jing Huang5fbe25c2010-10-18 17:17:23 -07002868 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002869 * queue I/O message to firmware
2870 */
2871 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2872 return BFA_TRUE;
2873}
2874
Jing Huang5fbe25c2010-10-18 17:17:23 -07002875/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002876 * Send port disable message to firmware.
2877 */
2878static bfa_boolean_t
2879bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
2880{
2881 struct bfi_fcport_req_s *m;
2882
Jing Huang5fbe25c2010-10-18 17:17:23 -07002883 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002884 * Increment message tag before queue check, so that responses to old
2885 * requests are discarded.
2886 */
2887 fcport->msgtag++;
2888
Jing Huang5fbe25c2010-10-18 17:17:23 -07002889 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002890 * check for room in queue to send request now
2891 */
2892 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2893 if (!m) {
2894 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2895 &fcport->reqq_wait);
2896 return BFA_FALSE;
2897 }
2898
2899 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
2900 bfa_lpuid(fcport->bfa));
2901 m->msgtag = fcport->msgtag;
2902
Jing Huang5fbe25c2010-10-18 17:17:23 -07002903 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002904 * queue I/O message to firmware
2905 */
2906 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2907
2908 return BFA_TRUE;
2909}
2910
2911static void
2912bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
2913{
Maggie Zhangf7f73812010-12-09 19:08:43 -08002914 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
2915 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002916
2917 bfa_trc(fcport->bfa, fcport->pwwn);
2918 bfa_trc(fcport->bfa, fcport->nwwn);
2919}
2920
2921static void
2922bfa_fcport_send_txcredit(void *port_cbarg)
2923{
2924
2925 struct bfa_fcport_s *fcport = port_cbarg;
2926 struct bfi_fcport_set_svc_params_req_s *m;
2927
Jing Huang5fbe25c2010-10-18 17:17:23 -07002928 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002929 * check for room in queue to send request now
2930 */
2931 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2932 if (!m) {
2933 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
2934 return;
2935 }
2936
2937 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
2938 bfa_lpuid(fcport->bfa));
Jing Huangba816ea2010-10-18 17:10:50 -07002939 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002940
Jing Huang5fbe25c2010-10-18 17:17:23 -07002941 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002942 * queue I/O message to firmware
2943 */
2944 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
2945}
2946
2947static void
2948bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
2949 struct bfa_qos_stats_s *s)
2950{
2951 u32 *dip = (u32 *) d;
Maggie50444a32010-11-29 18:26:32 -08002952 __be32 *sip = (__be32 *) s;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002953 int i;
2954
2955 /* Now swap the 32 bit fields */
2956 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
Jing Huangba816ea2010-10-18 17:10:50 -07002957 dip[i] = be32_to_cpu(sip[i]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002958}
2959
2960static void
2961bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
2962 struct bfa_fcoe_stats_s *s)
2963{
2964 u32 *dip = (u32 *) d;
Maggie50444a32010-11-29 18:26:32 -08002965 __be32 *sip = (__be32 *) s;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002966 int i;
2967
2968 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
2969 i = i + 2) {
2970#ifdef __BIGENDIAN
Jing Huangba816ea2010-10-18 17:10:50 -07002971 dip[i] = be32_to_cpu(sip[i]);
2972 dip[i + 1] = be32_to_cpu(sip[i + 1]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002973#else
Jing Huangba816ea2010-10-18 17:10:50 -07002974 dip[i] = be32_to_cpu(sip[i + 1]);
2975 dip[i + 1] = be32_to_cpu(sip[i]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002976#endif
2977 }
2978}
2979
2980static void
2981__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
2982{
2983 struct bfa_fcport_s *fcport = cbarg;
2984
2985 if (complete) {
2986 if (fcport->stats_status == BFA_STATUS_OK) {
2987 struct bfa_timeval_s tv;
2988
2989 /* Swap FC QoS or FCoE stats */
2990 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2991 bfa_fcport_qos_stats_swap(
2992 &fcport->stats_ret->fcqos,
2993 &fcport->stats->fcqos);
2994 } else {
2995 bfa_fcport_fcoe_stats_swap(
2996 &fcport->stats_ret->fcoe,
2997 &fcport->stats->fcoe);
2998
2999 bfa_os_gettimeofday(&tv);
3000 fcport->stats_ret->fcoe.secs_reset =
3001 tv.tv_sec - fcport->stats_reset_time;
3002 }
3003 }
3004 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3005 } else {
3006 fcport->stats_busy = BFA_FALSE;
3007 fcport->stats_status = BFA_STATUS_OK;
3008 }
3009}
3010
3011static void
3012bfa_fcport_stats_get_timeout(void *cbarg)
3013{
3014 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3015
3016 bfa_trc(fcport->bfa, fcport->stats_qfull);
3017
3018 if (fcport->stats_qfull) {
3019 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3020 fcport->stats_qfull = BFA_FALSE;
3021 }
3022
3023 fcport->stats_status = BFA_STATUS_ETIMER;
3024 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3025 fcport);
3026}
3027
3028static void
3029bfa_fcport_send_stats_get(void *cbarg)
3030{
3031 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3032 struct bfi_fcport_req_s *msg;
3033
3034 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3035
3036 if (!msg) {
3037 fcport->stats_qfull = BFA_TRUE;
3038 bfa_reqq_winit(&fcport->stats_reqq_wait,
3039 bfa_fcport_send_stats_get, fcport);
3040 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3041 &fcport->stats_reqq_wait);
3042 return;
3043 }
3044 fcport->stats_qfull = BFA_FALSE;
3045
Jing Huang6a18b162010-10-18 17:08:54 -07003046 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003047 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3048 bfa_lpuid(fcport->bfa));
3049 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3050}
3051
3052static void
3053__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3054{
3055 struct bfa_fcport_s *fcport = cbarg;
3056
3057 if (complete) {
3058 struct bfa_timeval_s tv;
3059
Jing Huang5fbe25c2010-10-18 17:17:23 -07003060 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003061 * re-initialize time stamp for stats reset
3062 */
3063 bfa_os_gettimeofday(&tv);
3064 fcport->stats_reset_time = tv.tv_sec;
3065
3066 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3067 } else {
3068 fcport->stats_busy = BFA_FALSE;
3069 fcport->stats_status = BFA_STATUS_OK;
3070 }
3071}
3072
3073static void
3074bfa_fcport_stats_clr_timeout(void *cbarg)
3075{
3076 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3077
3078 bfa_trc(fcport->bfa, fcport->stats_qfull);
3079
3080 if (fcport->stats_qfull) {
3081 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3082 fcport->stats_qfull = BFA_FALSE;
3083 }
3084
3085 fcport->stats_status = BFA_STATUS_ETIMER;
3086 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3087 __bfa_cb_fcport_stats_clr, fcport);
3088}
3089
3090static void
3091bfa_fcport_send_stats_clear(void *cbarg)
3092{
3093 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3094 struct bfi_fcport_req_s *msg;
3095
3096 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3097
3098 if (!msg) {
3099 fcport->stats_qfull = BFA_TRUE;
3100 bfa_reqq_winit(&fcport->stats_reqq_wait,
3101 bfa_fcport_send_stats_clear, fcport);
3102 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3103 &fcport->stats_reqq_wait);
3104 return;
3105 }
3106 fcport->stats_qfull = BFA_FALSE;
3107
Jing Huang6a18b162010-10-18 17:08:54 -07003108 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003109 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3110 bfa_lpuid(fcport->bfa));
3111 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3112}
3113
Jing Huang5fbe25c2010-10-18 17:17:23 -07003114/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003115 * Handle trunk SCN event from firmware.
3116 */
3117static void
3118bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3119{
3120 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3121 struct bfi_fcport_trunk_link_s *tlink;
3122 struct bfa_trunk_link_attr_s *lattr;
3123 enum bfa_trunk_state state_prev;
3124 int i;
3125 int link_bm = 0;
3126
3127 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3128 bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
3129 scn->trunk_state == BFA_TRUNK_OFFLINE);
3130
3131 bfa_trc(fcport->bfa, trunk->attr.state);
3132 bfa_trc(fcport->bfa, scn->trunk_state);
3133 bfa_trc(fcport->bfa, scn->trunk_speed);
3134
Jing Huang5fbe25c2010-10-18 17:17:23 -07003135 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003136 * Save off new state for trunk attribute query
3137 */
3138 state_prev = trunk->attr.state;
3139 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3140 trunk->attr.state = scn->trunk_state;
3141 trunk->attr.speed = scn->trunk_speed;
3142 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3143 lattr = &trunk->attr.link_attr[i];
3144 tlink = &scn->tlink[i];
3145
3146 lattr->link_state = tlink->state;
3147 lattr->trunk_wwn = tlink->trunk_wwn;
3148 lattr->fctl = tlink->fctl;
3149 lattr->speed = tlink->speed;
Jing Huangba816ea2010-10-18 17:10:50 -07003150 lattr->deskew = be32_to_cpu(tlink->deskew);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003151
3152 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3153 fcport->speed = tlink->speed;
3154 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3155 link_bm |= 1 << i;
3156 }
3157
3158 bfa_trc(fcport->bfa, lattr->link_state);
3159 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3160 bfa_trc(fcport->bfa, lattr->fctl);
3161 bfa_trc(fcport->bfa, lattr->speed);
3162 bfa_trc(fcport->bfa, lattr->deskew);
3163 }
3164
3165 switch (link_bm) {
3166 case 3:
3167 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3168 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3169 break;
3170 case 2:
3171 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3172 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3173 break;
3174 case 1:
3175 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3176 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3177 break;
3178 default:
3179 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3180 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3181 }
3182
Jing Huang5fbe25c2010-10-18 17:17:23 -07003183 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003184 * Notify upper layers if trunk state changed.
3185 */
3186 if ((state_prev != trunk->attr.state) ||
3187 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3188 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3189 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3190 }
3191}
3192
3193static void
3194bfa_trunk_iocdisable(struct bfa_s *bfa)
3195{
3196 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3197 int i = 0;
3198
Jing Huang5fbe25c2010-10-18 17:17:23 -07003199 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003200 * In trunked mode, notify upper layers that link is down
3201 */
3202 if (fcport->cfg.trunked) {
3203 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3204 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3205
3206 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3207 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3208 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3209 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3210 fcport->trunk.attr.link_attr[i].fctl =
3211 BFA_TRUNK_LINK_FCTL_NORMAL;
3212 fcport->trunk.attr.link_attr[i].link_state =
3213 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3214 fcport->trunk.attr.link_attr[i].speed =
3215 BFA_PORT_SPEED_UNKNOWN;
3216 fcport->trunk.attr.link_attr[i].deskew = 0;
3217 }
3218 }
3219}
3220
3221
3222
Jing Huang5fbe25c2010-10-18 17:17:23 -07003223/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003224 * hal_port_public
3225 */
3226
Jing Huang5fbe25c2010-10-18 17:17:23 -07003227/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003228 * Called to initialize port attributes
3229 */
3230void
3231bfa_fcport_init(struct bfa_s *bfa)
3232{
3233 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3234
Jing Huang5fbe25c2010-10-18 17:17:23 -07003235 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003236 * Initialize port attributes from IOC hardware data.
3237 */
3238 bfa_fcport_set_wwns(fcport);
3239 if (fcport->cfg.maxfrsize == 0)
3240 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3241 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3242 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3243
3244 bfa_assert(fcport->cfg.maxfrsize);
3245 bfa_assert(fcport->cfg.rx_bbcredit);
3246 bfa_assert(fcport->speed_sup);
3247}
3248
Jing Huang5fbe25c2010-10-18 17:17:23 -07003249/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003250 * Firmware message handler.
3251 */
3252void
3253bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3254{
3255 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3256 union bfi_fcport_i2h_msg_u i2hmsg;
3257
3258 i2hmsg.msg = msg;
3259 fcport->event_arg.i2hmsg = i2hmsg;
3260
3261 bfa_trc(bfa, msg->mhdr.msg_id);
3262 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3263
3264 switch (msg->mhdr.msg_id) {
3265 case BFI_FCPORT_I2H_ENABLE_RSP:
3266 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3267 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3268 break;
3269
3270 case BFI_FCPORT_I2H_DISABLE_RSP:
3271 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3272 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3273 break;
3274
3275 case BFI_FCPORT_I2H_EVENT:
3276 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3277 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3278 else
3279 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3280 break;
3281
3282 case BFI_FCPORT_I2H_TRUNK_SCN:
3283 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3284 break;
3285
3286 case BFI_FCPORT_I2H_STATS_GET_RSP:
3287 /*
3288 * check for timer pop before processing the rsp
3289 */
3290 if (fcport->stats_busy == BFA_FALSE ||
3291 fcport->stats_status == BFA_STATUS_ETIMER)
3292 break;
3293
3294 bfa_timer_stop(&fcport->timer);
3295 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3296 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3297 __bfa_cb_fcport_stats_get, fcport);
3298 break;
3299
3300 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3301 /*
3302 * check for timer pop before processing the rsp
3303 */
3304 if (fcport->stats_busy == BFA_FALSE ||
3305 fcport->stats_status == BFA_STATUS_ETIMER)
3306 break;
3307
3308 bfa_timer_stop(&fcport->timer);
3309 fcport->stats_status = BFA_STATUS_OK;
3310 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3311 __bfa_cb_fcport_stats_clr, fcport);
3312 break;
3313
3314 case BFI_FCPORT_I2H_ENABLE_AEN:
3315 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3316 break;
3317
3318 case BFI_FCPORT_I2H_DISABLE_AEN:
3319 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3320 break;
3321
3322 default:
3323 bfa_assert(0);
3324 break;
3325 }
3326}
3327
3328
3329
Jing Huang5fbe25c2010-10-18 17:17:23 -07003330/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003331 * hal_port_api
3332 */
3333
Jing Huang5fbe25c2010-10-18 17:17:23 -07003334/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003335 * Registered callback for port events.
3336 */
3337void
3338bfa_fcport_event_register(struct bfa_s *bfa,
3339 void (*cbfn) (void *cbarg,
3340 enum bfa_port_linkstate event),
3341 void *cbarg)
3342{
3343 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3344
3345 fcport->event_cbfn = cbfn;
3346 fcport->event_cbarg = cbarg;
3347}
3348
3349bfa_status_t
3350bfa_fcport_enable(struct bfa_s *bfa)
3351{
3352 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3353
3354 if (bfa_ioc_is_disabled(&bfa->ioc))
3355 return BFA_STATUS_IOC_DISABLED;
3356
3357 if (fcport->diag_busy)
3358 return BFA_STATUS_DIAG_BUSY;
3359
3360 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3361 return BFA_STATUS_OK;
3362}
3363
3364bfa_status_t
3365bfa_fcport_disable(struct bfa_s *bfa)
3366{
3367
3368 if (bfa_ioc_is_disabled(&bfa->ioc))
3369 return BFA_STATUS_IOC_DISABLED;
3370
3371 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3372 return BFA_STATUS_OK;
3373}
3374
Jing Huang5fbe25c2010-10-18 17:17:23 -07003375/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003376 * Configure port speed.
3377 */
3378bfa_status_t
3379bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3380{
3381 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3382
3383 bfa_trc(bfa, speed);
3384
3385 if (fcport->cfg.trunked == BFA_TRUE)
3386 return BFA_STATUS_TRUNK_ENABLED;
3387 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3388 bfa_trc(bfa, fcport->speed_sup);
3389 return BFA_STATUS_UNSUPP_SPEED;
3390 }
3391
3392 fcport->cfg.speed = speed;
3393
3394 return BFA_STATUS_OK;
3395}
3396
Jing Huang5fbe25c2010-10-18 17:17:23 -07003397/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003398 * Get current speed.
3399 */
3400enum bfa_port_speed
3401bfa_fcport_get_speed(struct bfa_s *bfa)
3402{
3403 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3404
3405 return fcport->speed;
3406}
3407
Jing Huang5fbe25c2010-10-18 17:17:23 -07003408/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003409 * Configure port topology.
3410 */
3411bfa_status_t
3412bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3413{
3414 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3415
3416 bfa_trc(bfa, topology);
3417 bfa_trc(bfa, fcport->cfg.topology);
3418
3419 switch (topology) {
3420 case BFA_PORT_TOPOLOGY_P2P:
3421 case BFA_PORT_TOPOLOGY_LOOP:
3422 case BFA_PORT_TOPOLOGY_AUTO:
3423 break;
3424
3425 default:
3426 return BFA_STATUS_EINVAL;
3427 }
3428
3429 fcport->cfg.topology = topology;
3430 return BFA_STATUS_OK;
3431}
3432
Jing Huang5fbe25c2010-10-18 17:17:23 -07003433/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003434 * Get current topology.
3435 */
3436enum bfa_port_topology
3437bfa_fcport_get_topology(struct bfa_s *bfa)
3438{
3439 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3440
3441 return fcport->topology;
3442}
3443
3444bfa_status_t
3445bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3446{
3447 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3448
3449 bfa_trc(bfa, alpa);
3450 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3451 bfa_trc(bfa, fcport->cfg.hardalpa);
3452
3453 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3454 fcport->cfg.hardalpa = alpa;
3455
3456 return BFA_STATUS_OK;
3457}
3458
3459bfa_status_t
3460bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3461{
3462 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3463
3464 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3465 bfa_trc(bfa, fcport->cfg.hardalpa);
3466
3467 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3468 return BFA_STATUS_OK;
3469}
3470
3471bfa_boolean_t
3472bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3473{
3474 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3475
3476 *alpa = fcport->cfg.hardalpa;
3477 return fcport->cfg.cfg_hardalpa;
3478}
3479
3480u8
3481bfa_fcport_get_myalpa(struct bfa_s *bfa)
3482{
3483 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3484
3485 return fcport->myalpa;
3486}
3487
3488bfa_status_t
3489bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3490{
3491 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3492
3493 bfa_trc(bfa, maxfrsize);
3494 bfa_trc(bfa, fcport->cfg.maxfrsize);
3495
3496 /* with in range */
3497 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3498 return BFA_STATUS_INVLD_DFSZ;
3499
3500 /* power of 2, if not the max frame size of 2112 */
3501 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3502 return BFA_STATUS_INVLD_DFSZ;
3503
3504 fcport->cfg.maxfrsize = maxfrsize;
3505 return BFA_STATUS_OK;
3506}
3507
3508u16
3509bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3510{
3511 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3512
3513 return fcport->cfg.maxfrsize;
3514}
3515
3516u8
3517bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3518{
3519 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3520
3521 return fcport->cfg.rx_bbcredit;
3522}
3523
3524void
3525bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3526{
3527 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3528
3529 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3530 bfa_fcport_send_txcredit(fcport);
3531}
3532
Jing Huang5fbe25c2010-10-18 17:17:23 -07003533/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003534 * Get port attributes.
3535 */
3536
3537wwn_t
3538bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3539{
3540 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3541 if (node)
3542 return fcport->nwwn;
3543 else
3544 return fcport->pwwn;
3545}
3546
3547void
3548bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3549{
3550 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3551
Jing Huang6a18b162010-10-18 17:08:54 -07003552 memset(attr, 0, sizeof(struct bfa_port_attr_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003553
3554 attr->nwwn = fcport->nwwn;
3555 attr->pwwn = fcport->pwwn;
3556
Maggie Zhangf7f73812010-12-09 19:08:43 -08003557 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3558 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003559
Jing Huang6a18b162010-10-18 17:08:54 -07003560 memcpy(&attr->pport_cfg, &fcport->cfg,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003561 sizeof(struct bfa_port_cfg_s));
3562 /* speed attributes */
3563 attr->pport_cfg.speed = fcport->cfg.speed;
3564 attr->speed_supported = fcport->speed_sup;
3565 attr->speed = fcport->speed;
3566 attr->cos_supported = FC_CLASS_3;
3567
3568 /* topology attributes */
3569 attr->pport_cfg.topology = fcport->cfg.topology;
3570 attr->topology = fcport->topology;
3571 attr->pport_cfg.trunked = fcport->cfg.trunked;
3572
3573 /* beacon attributes */
3574 attr->beacon = fcport->beacon;
3575 attr->link_e2e_beacon = fcport->link_e2e_beacon;
Maggie Zhangf7f73812010-12-09 19:08:43 -08003576 attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003577 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3578
3579 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3580 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3581 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3582 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3583 attr->port_state = BFA_PORT_ST_IOCDIS;
3584 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3585 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3586
3587 /* FCoE vlan */
3588 attr->fcoe_vlan = fcport->fcoe_vlan;
3589}
3590
3591#define BFA_FCPORT_STATS_TOV 1000
3592
Jing Huang5fbe25c2010-10-18 17:17:23 -07003593/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003594 * Fetch port statistics (FCQoS or FCoE).
3595 */
3596bfa_status_t
3597bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3598 bfa_cb_port_t cbfn, void *cbarg)
3599{
3600 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3601
3602 if (fcport->stats_busy) {
3603 bfa_trc(bfa, fcport->stats_busy);
3604 return BFA_STATUS_DEVBUSY;
3605 }
3606
3607 fcport->stats_busy = BFA_TRUE;
3608 fcport->stats_ret = stats;
3609 fcport->stats_cbfn = cbfn;
3610 fcport->stats_cbarg = cbarg;
3611
3612 bfa_fcport_send_stats_get(fcport);
3613
3614 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3615 fcport, BFA_FCPORT_STATS_TOV);
3616 return BFA_STATUS_OK;
3617}
3618
Jing Huang5fbe25c2010-10-18 17:17:23 -07003619/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003620 * Reset port statistics (FCQoS or FCoE).
3621 */
3622bfa_status_t
3623bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3624{
3625 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3626
3627 if (fcport->stats_busy) {
3628 bfa_trc(bfa, fcport->stats_busy);
3629 return BFA_STATUS_DEVBUSY;
3630 }
3631
3632 fcport->stats_busy = BFA_TRUE;
3633 fcport->stats_cbfn = cbfn;
3634 fcport->stats_cbarg = cbarg;
3635
3636 bfa_fcport_send_stats_clear(fcport);
3637
3638 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3639 fcport, BFA_FCPORT_STATS_TOV);
3640 return BFA_STATUS_OK;
3641}
3642
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003643
Jing Huang5fbe25c2010-10-18 17:17:23 -07003644/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003645 * Fetch port attributes.
3646 */
3647bfa_boolean_t
3648bfa_fcport_is_disabled(struct bfa_s *bfa)
3649{
3650 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3651
3652 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3653 BFA_PORT_ST_DISABLED;
3654
3655}
3656
3657bfa_boolean_t
3658bfa_fcport_is_ratelim(struct bfa_s *bfa)
3659{
3660 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3661
3662 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3663
3664}
3665
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003666
Jing Huang5fbe25c2010-10-18 17:17:23 -07003667/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003668 * Get default minimum ratelim speed
3669 */
3670enum bfa_port_speed
3671bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3672{
3673 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3674
3675 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3676 return fcport->cfg.trl_def_speed;
3677
3678}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003679
3680bfa_boolean_t
3681bfa_fcport_is_linkup(struct bfa_s *bfa)
3682{
3683 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3684
3685 return (!fcport->cfg.trunked &&
3686 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3687 (fcport->cfg.trunked &&
3688 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3689}
3690
3691bfa_boolean_t
3692bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3693{
3694 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3695
3696 return fcport->cfg.qos_enabled;
3697}
3698
Jing Huang5fbe25c2010-10-18 17:17:23 -07003699/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003700 * Rport State machine functions
3701 */
Jing Huang5fbe25c2010-10-18 17:17:23 -07003702/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003703 * Beginning state, only online event expected.
3704 */
3705static void
3706bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3707{
3708 bfa_trc(rp->bfa, rp->rport_tag);
3709 bfa_trc(rp->bfa, event);
3710
3711 switch (event) {
3712 case BFA_RPORT_SM_CREATE:
3713 bfa_stats(rp, sm_un_cr);
3714 bfa_sm_set_state(rp, bfa_rport_sm_created);
3715 break;
3716
3717 default:
3718 bfa_stats(rp, sm_un_unexp);
3719 bfa_sm_fault(rp->bfa, event);
3720 }
3721}
3722
3723static void
3724bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3725{
3726 bfa_trc(rp->bfa, rp->rport_tag);
3727 bfa_trc(rp->bfa, event);
3728
3729 switch (event) {
3730 case BFA_RPORT_SM_ONLINE:
3731 bfa_stats(rp, sm_cr_on);
3732 if (bfa_rport_send_fwcreate(rp))
3733 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3734 else
3735 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3736 break;
3737
3738 case BFA_RPORT_SM_DELETE:
3739 bfa_stats(rp, sm_cr_del);
3740 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3741 bfa_rport_free(rp);
3742 break;
3743
3744 case BFA_RPORT_SM_HWFAIL:
3745 bfa_stats(rp, sm_cr_hwf);
3746 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3747 break;
3748
3749 default:
3750 bfa_stats(rp, sm_cr_unexp);
3751 bfa_sm_fault(rp->bfa, event);
3752 }
3753}
3754
Jing Huang5fbe25c2010-10-18 17:17:23 -07003755/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003756 * Waiting for rport create response from firmware.
3757 */
3758static void
3759bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3760{
3761 bfa_trc(rp->bfa, rp->rport_tag);
3762 bfa_trc(rp->bfa, event);
3763
3764 switch (event) {
3765 case BFA_RPORT_SM_FWRSP:
3766 bfa_stats(rp, sm_fwc_rsp);
3767 bfa_sm_set_state(rp, bfa_rport_sm_online);
3768 bfa_rport_online_cb(rp);
3769 break;
3770
3771 case BFA_RPORT_SM_DELETE:
3772 bfa_stats(rp, sm_fwc_del);
3773 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3774 break;
3775
3776 case BFA_RPORT_SM_OFFLINE:
3777 bfa_stats(rp, sm_fwc_off);
3778 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
3779 break;
3780
3781 case BFA_RPORT_SM_HWFAIL:
3782 bfa_stats(rp, sm_fwc_hwf);
3783 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3784 break;
3785
3786 default:
3787 bfa_stats(rp, sm_fwc_unexp);
3788 bfa_sm_fault(rp->bfa, event);
3789 }
3790}
3791
Jing Huang5fbe25c2010-10-18 17:17:23 -07003792/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003793 * Request queue is full, awaiting queue resume to send create request.
3794 */
3795static void
3796bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3797{
3798 bfa_trc(rp->bfa, rp->rport_tag);
3799 bfa_trc(rp->bfa, event);
3800
3801 switch (event) {
3802 case BFA_RPORT_SM_QRESUME:
3803 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3804 bfa_rport_send_fwcreate(rp);
3805 break;
3806
3807 case BFA_RPORT_SM_DELETE:
3808 bfa_stats(rp, sm_fwc_del);
3809 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3810 bfa_reqq_wcancel(&rp->reqq_wait);
3811 bfa_rport_free(rp);
3812 break;
3813
3814 case BFA_RPORT_SM_OFFLINE:
3815 bfa_stats(rp, sm_fwc_off);
3816 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3817 bfa_reqq_wcancel(&rp->reqq_wait);
3818 bfa_rport_offline_cb(rp);
3819 break;
3820
3821 case BFA_RPORT_SM_HWFAIL:
3822 bfa_stats(rp, sm_fwc_hwf);
3823 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3824 bfa_reqq_wcancel(&rp->reqq_wait);
3825 break;
3826
3827 default:
3828 bfa_stats(rp, sm_fwc_unexp);
3829 bfa_sm_fault(rp->bfa, event);
3830 }
3831}
3832
Jing Huang5fbe25c2010-10-18 17:17:23 -07003833/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003834 * Online state - normal parking state.
3835 */
3836static void
3837bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
3838{
3839 struct bfi_rport_qos_scn_s *qos_scn;
3840
3841 bfa_trc(rp->bfa, rp->rport_tag);
3842 bfa_trc(rp->bfa, event);
3843
3844 switch (event) {
3845 case BFA_RPORT_SM_OFFLINE:
3846 bfa_stats(rp, sm_on_off);
3847 if (bfa_rport_send_fwdelete(rp))
3848 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3849 else
3850 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
3851 break;
3852
3853 case BFA_RPORT_SM_DELETE:
3854 bfa_stats(rp, sm_on_del);
3855 if (bfa_rport_send_fwdelete(rp))
3856 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3857 else
3858 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3859 break;
3860
3861 case BFA_RPORT_SM_HWFAIL:
3862 bfa_stats(rp, sm_on_hwf);
3863 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3864 break;
3865
3866 case BFA_RPORT_SM_SET_SPEED:
3867 bfa_rport_send_fwspeed(rp);
3868 break;
3869
3870 case BFA_RPORT_SM_QOS_SCN:
3871 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
3872 rp->qos_attr = qos_scn->new_qos_attr;
3873 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
3874 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
3875 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
3876 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
3877
3878 qos_scn->old_qos_attr.qos_flow_id =
Jing Huangba816ea2010-10-18 17:10:50 -07003879 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003880 qos_scn->new_qos_attr.qos_flow_id =
Jing Huangba816ea2010-10-18 17:10:50 -07003881 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003882
3883 if (qos_scn->old_qos_attr.qos_flow_id !=
3884 qos_scn->new_qos_attr.qos_flow_id)
3885 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
3886 qos_scn->old_qos_attr,
3887 qos_scn->new_qos_attr);
3888 if (qos_scn->old_qos_attr.qos_priority !=
3889 qos_scn->new_qos_attr.qos_priority)
3890 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
3891 qos_scn->old_qos_attr,
3892 qos_scn->new_qos_attr);
3893 break;
3894
3895 default:
3896 bfa_stats(rp, sm_on_unexp);
3897 bfa_sm_fault(rp->bfa, event);
3898 }
3899}
3900
Jing Huang5fbe25c2010-10-18 17:17:23 -07003901/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003902 * Firmware rport is being deleted - awaiting f/w response.
3903 */
3904static void
3905bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
3906{
3907 bfa_trc(rp->bfa, rp->rport_tag);
3908 bfa_trc(rp->bfa, event);
3909
3910 switch (event) {
3911 case BFA_RPORT_SM_FWRSP:
3912 bfa_stats(rp, sm_fwd_rsp);
3913 bfa_sm_set_state(rp, bfa_rport_sm_offline);
3914 bfa_rport_offline_cb(rp);
3915 break;
3916
3917 case BFA_RPORT_SM_DELETE:
3918 bfa_stats(rp, sm_fwd_del);
3919 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
3920 break;
3921
3922 case BFA_RPORT_SM_HWFAIL:
3923 bfa_stats(rp, sm_fwd_hwf);
3924 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3925 bfa_rport_offline_cb(rp);
3926 break;
3927
3928 default:
3929 bfa_stats(rp, sm_fwd_unexp);
3930 bfa_sm_fault(rp->bfa, event);
3931 }
3932}
3933
3934static void
3935bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
3936{
3937 bfa_trc(rp->bfa, rp->rport_tag);
3938 bfa_trc(rp->bfa, event);
3939
3940 switch (event) {
3941 case BFA_RPORT_SM_QRESUME:
3942 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
3943 bfa_rport_send_fwdelete(rp);
3944 break;
3945
3946 case BFA_RPORT_SM_DELETE:
3947 bfa_stats(rp, sm_fwd_del);
3948 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
3949 break;
3950
3951 case BFA_RPORT_SM_HWFAIL:
3952 bfa_stats(rp, sm_fwd_hwf);
3953 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3954 bfa_reqq_wcancel(&rp->reqq_wait);
3955 bfa_rport_offline_cb(rp);
3956 break;
3957
3958 default:
3959 bfa_stats(rp, sm_fwd_unexp);
3960 bfa_sm_fault(rp->bfa, event);
3961 }
3962}
3963
Jing Huang5fbe25c2010-10-18 17:17:23 -07003964/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003965 * Offline state.
3966 */
3967static void
3968bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
3969{
3970 bfa_trc(rp->bfa, rp->rport_tag);
3971 bfa_trc(rp->bfa, event);
3972
3973 switch (event) {
3974 case BFA_RPORT_SM_DELETE:
3975 bfa_stats(rp, sm_off_del);
3976 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3977 bfa_rport_free(rp);
3978 break;
3979
3980 case BFA_RPORT_SM_ONLINE:
3981 bfa_stats(rp, sm_off_on);
3982 if (bfa_rport_send_fwcreate(rp))
3983 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3984 else
3985 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3986 break;
3987
3988 case BFA_RPORT_SM_HWFAIL:
3989 bfa_stats(rp, sm_off_hwf);
3990 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3991 break;
3992
3993 default:
3994 bfa_stats(rp, sm_off_unexp);
3995 bfa_sm_fault(rp->bfa, event);
3996 }
3997}
3998
Jing Huang5fbe25c2010-10-18 17:17:23 -07003999/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004000 * Rport is deleted, waiting for firmware response to delete.
4001 */
4002static void
4003bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4004{
4005 bfa_trc(rp->bfa, rp->rport_tag);
4006 bfa_trc(rp->bfa, event);
4007
4008 switch (event) {
4009 case BFA_RPORT_SM_FWRSP:
4010 bfa_stats(rp, sm_del_fwrsp);
4011 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4012 bfa_rport_free(rp);
4013 break;
4014
4015 case BFA_RPORT_SM_HWFAIL:
4016 bfa_stats(rp, sm_del_hwf);
4017 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4018 bfa_rport_free(rp);
4019 break;
4020
4021 default:
4022 bfa_sm_fault(rp->bfa, event);
4023 }
4024}
4025
4026static void
4027bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4028{
4029 bfa_trc(rp->bfa, rp->rport_tag);
4030 bfa_trc(rp->bfa, event);
4031
4032 switch (event) {
4033 case BFA_RPORT_SM_QRESUME:
4034 bfa_stats(rp, sm_del_fwrsp);
4035 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4036 bfa_rport_send_fwdelete(rp);
4037 break;
4038
4039 case BFA_RPORT_SM_HWFAIL:
4040 bfa_stats(rp, sm_del_hwf);
4041 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4042 bfa_reqq_wcancel(&rp->reqq_wait);
4043 bfa_rport_free(rp);
4044 break;
4045
4046 default:
4047 bfa_sm_fault(rp->bfa, event);
4048 }
4049}
4050
Jing Huang5fbe25c2010-10-18 17:17:23 -07004051/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004052 * Waiting for rport create response from firmware. A delete is pending.
4053 */
4054static void
4055bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4056 enum bfa_rport_event event)
4057{
4058 bfa_trc(rp->bfa, rp->rport_tag);
4059 bfa_trc(rp->bfa, event);
4060
4061 switch (event) {
4062 case BFA_RPORT_SM_FWRSP:
4063 bfa_stats(rp, sm_delp_fwrsp);
4064 if (bfa_rport_send_fwdelete(rp))
4065 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4066 else
4067 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4068 break;
4069
4070 case BFA_RPORT_SM_HWFAIL:
4071 bfa_stats(rp, sm_delp_hwf);
4072 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4073 bfa_rport_free(rp);
4074 break;
4075
4076 default:
4077 bfa_stats(rp, sm_delp_unexp);
4078 bfa_sm_fault(rp->bfa, event);
4079 }
4080}
4081
Jing Huang5fbe25c2010-10-18 17:17:23 -07004082/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004083 * Waiting for rport create response from firmware. Rport offline is pending.
4084 */
4085static void
4086bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4087 enum bfa_rport_event event)
4088{
4089 bfa_trc(rp->bfa, rp->rport_tag);
4090 bfa_trc(rp->bfa, event);
4091
4092 switch (event) {
4093 case BFA_RPORT_SM_FWRSP:
4094 bfa_stats(rp, sm_offp_fwrsp);
4095 if (bfa_rport_send_fwdelete(rp))
4096 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4097 else
4098 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4099 break;
4100
4101 case BFA_RPORT_SM_DELETE:
4102 bfa_stats(rp, sm_offp_del);
4103 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4104 break;
4105
4106 case BFA_RPORT_SM_HWFAIL:
4107 bfa_stats(rp, sm_offp_hwf);
4108 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4109 break;
4110
4111 default:
4112 bfa_stats(rp, sm_offp_unexp);
4113 bfa_sm_fault(rp->bfa, event);
4114 }
4115}
4116
Jing Huang5fbe25c2010-10-18 17:17:23 -07004117/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004118 * IOC h/w failed.
4119 */
4120static void
4121bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4122{
4123 bfa_trc(rp->bfa, rp->rport_tag);
4124 bfa_trc(rp->bfa, event);
4125
4126 switch (event) {
4127 case BFA_RPORT_SM_OFFLINE:
4128 bfa_stats(rp, sm_iocd_off);
4129 bfa_rport_offline_cb(rp);
4130 break;
4131
4132 case BFA_RPORT_SM_DELETE:
4133 bfa_stats(rp, sm_iocd_del);
4134 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4135 bfa_rport_free(rp);
4136 break;
4137
4138 case BFA_RPORT_SM_ONLINE:
4139 bfa_stats(rp, sm_iocd_on);
4140 if (bfa_rport_send_fwcreate(rp))
4141 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4142 else
4143 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4144 break;
4145
4146 case BFA_RPORT_SM_HWFAIL:
4147 break;
4148
4149 default:
4150 bfa_stats(rp, sm_iocd_unexp);
4151 bfa_sm_fault(rp->bfa, event);
4152 }
4153}
4154
4155
4156
Jing Huang5fbe25c2010-10-18 17:17:23 -07004157/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004158 * bfa_rport_private BFA rport private functions
4159 */
4160
4161static void
4162__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4163{
4164 struct bfa_rport_s *rp = cbarg;
4165
4166 if (complete)
4167 bfa_cb_rport_online(rp->rport_drv);
4168}
4169
4170static void
4171__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4172{
4173 struct bfa_rport_s *rp = cbarg;
4174
4175 if (complete)
4176 bfa_cb_rport_offline(rp->rport_drv);
4177}
4178
4179static void
4180bfa_rport_qresume(void *cbarg)
4181{
4182 struct bfa_rport_s *rp = cbarg;
4183
4184 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4185}
4186
4187static void
4188bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4189 u32 *dm_len)
4190{
4191 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4192 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4193
4194 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4195}
4196
4197static void
4198bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4199 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4200{
4201 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4202 struct bfa_rport_s *rp;
4203 u16 i;
4204
4205 INIT_LIST_HEAD(&mod->rp_free_q);
4206 INIT_LIST_HEAD(&mod->rp_active_q);
4207
4208 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4209 mod->rps_list = rp;
4210 mod->num_rports = cfg->fwcfg.num_rports;
4211
4212 bfa_assert(mod->num_rports &&
4213 !(mod->num_rports & (mod->num_rports - 1)));
4214
4215 for (i = 0; i < mod->num_rports; i++, rp++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004216 memset(rp, 0, sizeof(struct bfa_rport_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004217 rp->bfa = bfa;
4218 rp->rport_tag = i;
4219 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4220
Jing Huang5fbe25c2010-10-18 17:17:23 -07004221 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004222 * - is unused
4223 */
4224 if (i)
4225 list_add_tail(&rp->qe, &mod->rp_free_q);
4226
4227 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4228 }
4229
Jing Huang5fbe25c2010-10-18 17:17:23 -07004230 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004231 * consume memory
4232 */
4233 bfa_meminfo_kva(meminfo) = (u8 *) rp;
4234}
4235
4236static void
4237bfa_rport_detach(struct bfa_s *bfa)
4238{
4239}
4240
4241static void
4242bfa_rport_start(struct bfa_s *bfa)
4243{
4244}
4245
4246static void
4247bfa_rport_stop(struct bfa_s *bfa)
4248{
4249}
4250
4251static void
4252bfa_rport_iocdisable(struct bfa_s *bfa)
4253{
4254 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4255 struct bfa_rport_s *rport;
4256 struct list_head *qe, *qen;
4257
4258 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4259 rport = (struct bfa_rport_s *) qe;
4260 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4261 }
4262}
4263
4264static struct bfa_rport_s *
4265bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4266{
4267 struct bfa_rport_s *rport;
4268
4269 bfa_q_deq(&mod->rp_free_q, &rport);
4270 if (rport)
4271 list_add_tail(&rport->qe, &mod->rp_active_q);
4272
4273 return rport;
4274}
4275
4276static void
4277bfa_rport_free(struct bfa_rport_s *rport)
4278{
4279 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4280
4281 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
4282 list_del(&rport->qe);
4283 list_add_tail(&rport->qe, &mod->rp_free_q);
4284}
4285
4286static bfa_boolean_t
4287bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4288{
4289 struct bfi_rport_create_req_s *m;
4290
Jing Huang5fbe25c2010-10-18 17:17:23 -07004291 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004292 * check for room in queue to send request now
4293 */
4294 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4295 if (!m) {
4296 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4297 return BFA_FALSE;
4298 }
4299
4300 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4301 bfa_lpuid(rp->bfa));
4302 m->bfa_handle = rp->rport_tag;
Jing Huangba816ea2010-10-18 17:10:50 -07004303 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004304 m->pid = rp->rport_info.pid;
4305 m->lp_tag = rp->rport_info.lp_tag;
4306 m->local_pid = rp->rport_info.local_pid;
4307 m->fc_class = rp->rport_info.fc_class;
4308 m->vf_en = rp->rport_info.vf_en;
4309 m->vf_id = rp->rport_info.vf_id;
4310 m->cisc = rp->rport_info.cisc;
4311
Jing Huang5fbe25c2010-10-18 17:17:23 -07004312 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004313 * queue I/O message to firmware
4314 */
4315 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4316 return BFA_TRUE;
4317}
4318
4319static bfa_boolean_t
4320bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4321{
4322 struct bfi_rport_delete_req_s *m;
4323
Jing Huang5fbe25c2010-10-18 17:17:23 -07004324 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004325 * check for room in queue to send request now
4326 */
4327 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4328 if (!m) {
4329 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4330 return BFA_FALSE;
4331 }
4332
4333 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4334 bfa_lpuid(rp->bfa));
4335 m->fw_handle = rp->fw_handle;
4336
Jing Huang5fbe25c2010-10-18 17:17:23 -07004337 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004338 * queue I/O message to firmware
4339 */
4340 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4341 return BFA_TRUE;
4342}
4343
4344static bfa_boolean_t
4345bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4346{
4347 struct bfa_rport_speed_req_s *m;
4348
Jing Huang5fbe25c2010-10-18 17:17:23 -07004349 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004350 * check for room in queue to send request now
4351 */
4352 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4353 if (!m) {
4354 bfa_trc(rp->bfa, rp->rport_info.speed);
4355 return BFA_FALSE;
4356 }
4357
4358 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4359 bfa_lpuid(rp->bfa));
4360 m->fw_handle = rp->fw_handle;
4361 m->speed = (u8)rp->rport_info.speed;
4362
Jing Huang5fbe25c2010-10-18 17:17:23 -07004363 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004364 * queue I/O message to firmware
4365 */
4366 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4367 return BFA_TRUE;
4368}
4369
4370
4371
Jing Huang5fbe25c2010-10-18 17:17:23 -07004372/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004373 * bfa_rport_public
4374 */
4375
Jing Huang5fbe25c2010-10-18 17:17:23 -07004376/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004377 * Rport interrupt processing.
4378 */
4379void
4380bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4381{
4382 union bfi_rport_i2h_msg_u msg;
4383 struct bfa_rport_s *rp;
4384
4385 bfa_trc(bfa, m->mhdr.msg_id);
4386
4387 msg.msg = m;
4388
4389 switch (m->mhdr.msg_id) {
4390 case BFI_RPORT_I2H_CREATE_RSP:
4391 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4392 rp->fw_handle = msg.create_rsp->fw_handle;
4393 rp->qos_attr = msg.create_rsp->qos_attr;
4394 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
4395 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4396 break;
4397
4398 case BFI_RPORT_I2H_DELETE_RSP:
4399 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4400 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
4401 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4402 break;
4403
4404 case BFI_RPORT_I2H_QOS_SCN:
4405 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4406 rp->event_arg.fw_msg = msg.qos_scn_evt;
4407 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4408 break;
4409
4410 default:
4411 bfa_trc(bfa, m->mhdr.msg_id);
4412 bfa_assert(0);
4413 }
4414}
4415
4416
4417
Jing Huang5fbe25c2010-10-18 17:17:23 -07004418/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004419 * bfa_rport_api
4420 */
4421
4422struct bfa_rport_s *
4423bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4424{
4425 struct bfa_rport_s *rp;
4426
4427 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4428
4429 if (rp == NULL)
4430 return NULL;
4431
4432 rp->bfa = bfa;
4433 rp->rport_drv = rport_drv;
Maggie Zhangf7f73812010-12-09 19:08:43 -08004434 memset(&rp->stats, 0, sizeof(rp->stats));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004435
4436 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4437 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4438
4439 return rp;
4440}
4441
4442void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004443bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4444{
4445 bfa_assert(rport_info->max_frmsz != 0);
4446
Jing Huang5fbe25c2010-10-18 17:17:23 -07004447 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004448 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4449 * responses. Default to minimum size.
4450 */
4451 if (rport_info->max_frmsz == 0) {
4452 bfa_trc(rport->bfa, rport->rport_tag);
4453 rport_info->max_frmsz = FC_MIN_PDUSZ;
4454 }
4455
Jing Huang6a18b162010-10-18 17:08:54 -07004456 rport->rport_info = *rport_info;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004457 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4458}
4459
4460void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004461bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4462{
4463 bfa_assert(speed != 0);
4464 bfa_assert(speed != BFA_PORT_SPEED_AUTO);
4465
4466 rport->rport_info.speed = speed;
4467 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4468}
4469
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004470
Jing Huang5fbe25c2010-10-18 17:17:23 -07004471/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004472 * SGPG related functions
4473 */
4474
Jing Huang5fbe25c2010-10-18 17:17:23 -07004475/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004476 * Compute and return memory needed by FCP(im) module.
4477 */
4478static void
4479bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4480 u32 *dm_len)
4481{
4482 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4483 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4484
4485 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4486 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4487}
4488
4489
4490static void
4491bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4492 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4493{
4494 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4495 int i;
4496 struct bfa_sgpg_s *hsgpg;
4497 struct bfi_sgpg_s *sgpg;
4498 u64 align_len;
4499
4500 union {
4501 u64 pa;
4502 union bfi_addr_u addr;
4503 } sgpg_pa, sgpg_pa_tmp;
4504
4505 INIT_LIST_HEAD(&mod->sgpg_q);
4506 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4507
4508 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4509
4510 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4511 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4512 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4513 mod->sgpg_arr_pa += align_len;
4514 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4515 align_len);
4516 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4517 align_len);
4518
4519 hsgpg = mod->hsgpg_arr;
4520 sgpg = mod->sgpg_arr;
4521 sgpg_pa.pa = mod->sgpg_arr_pa;
4522 mod->free_sgpgs = mod->num_sgpgs;
4523
4524 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4525
4526 for (i = 0; i < mod->num_sgpgs; i++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004527 memset(hsgpg, 0, sizeof(*hsgpg));
4528 memset(sgpg, 0, sizeof(*sgpg));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004529
4530 hsgpg->sgpg = sgpg;
4531 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4532 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4533 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4534
4535 hsgpg++;
4536 sgpg++;
4537 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4538 }
4539
4540 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4541 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4542 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4543}
4544
4545static void
4546bfa_sgpg_detach(struct bfa_s *bfa)
4547{
4548}
4549
4550static void
4551bfa_sgpg_start(struct bfa_s *bfa)
4552{
4553}
4554
4555static void
4556bfa_sgpg_stop(struct bfa_s *bfa)
4557{
4558}
4559
4560static void
4561bfa_sgpg_iocdisable(struct bfa_s *bfa)
4562{
4563}
4564
4565
4566
Jing Huang5fbe25c2010-10-18 17:17:23 -07004567/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004568 * hal_sgpg_public BFA SGPG public functions
4569 */
4570
4571bfa_status_t
4572bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4573{
4574 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4575 struct bfa_sgpg_s *hsgpg;
4576 int i;
4577
4578 bfa_trc_fp(bfa, nsgpgs);
4579
4580 if (mod->free_sgpgs < nsgpgs)
4581 return BFA_STATUS_ENOMEM;
4582
4583 for (i = 0; i < nsgpgs; i++) {
4584 bfa_q_deq(&mod->sgpg_q, &hsgpg);
4585 bfa_assert(hsgpg);
4586 list_add_tail(&hsgpg->qe, sgpg_q);
4587 }
4588
4589 mod->free_sgpgs -= nsgpgs;
4590 return BFA_STATUS_OK;
4591}
4592
4593void
4594bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4595{
4596 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4597 struct bfa_sgpg_wqe_s *wqe;
4598
4599 bfa_trc_fp(bfa, nsgpg);
4600
4601 mod->free_sgpgs += nsgpg;
4602 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
4603
4604 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4605
4606 if (list_empty(&mod->sgpg_wait_q))
4607 return;
4608
Jing Huang5fbe25c2010-10-18 17:17:23 -07004609 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004610 * satisfy as many waiting requests as possible
4611 */
4612 do {
4613 wqe = bfa_q_first(&mod->sgpg_wait_q);
4614 if (mod->free_sgpgs < wqe->nsgpg)
4615 nsgpg = mod->free_sgpgs;
4616 else
4617 nsgpg = wqe->nsgpg;
4618 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4619 wqe->nsgpg -= nsgpg;
4620 if (wqe->nsgpg == 0) {
4621 list_del(&wqe->qe);
4622 wqe->cbfn(wqe->cbarg);
4623 }
4624 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4625}
4626
4627void
4628bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4629{
4630 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4631
4632 bfa_assert(nsgpg > 0);
4633 bfa_assert(nsgpg > mod->free_sgpgs);
4634
4635 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4636
Jing Huang5fbe25c2010-10-18 17:17:23 -07004637 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004638 * allocate any left to this one first
4639 */
4640 if (mod->free_sgpgs) {
Jing Huang5fbe25c2010-10-18 17:17:23 -07004641 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004642 * no one else is waiting for SGPG
4643 */
4644 bfa_assert(list_empty(&mod->sgpg_wait_q));
4645 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4646 wqe->nsgpg -= mod->free_sgpgs;
4647 mod->free_sgpgs = 0;
4648 }
4649
4650 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4651}
4652
4653void
4654bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4655{
4656 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4657
4658 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
4659 list_del(&wqe->qe);
4660
4661 if (wqe->nsgpg_total != wqe->nsgpg)
4662 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4663 wqe->nsgpg_total - wqe->nsgpg);
4664}
4665
4666void
4667bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4668 void *cbarg)
4669{
4670 INIT_LIST_HEAD(&wqe->sgpg_q);
4671 wqe->cbfn = cbfn;
4672 wqe->cbarg = cbarg;
4673}
4674
Jing Huang5fbe25c2010-10-18 17:17:23 -07004675/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004676 * UF related functions
4677 */
4678/*
4679 *****************************************************************************
4680 * Internal functions
4681 *****************************************************************************
4682 */
4683static void
4684__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4685{
4686 struct bfa_uf_s *uf = cbarg;
4687 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4688
4689 if (complete)
4690 ufm->ufrecv(ufm->cbarg, uf);
4691}
4692
4693static void
4694claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4695{
4696 u32 uf_pb_tot_sz;
4697
4698 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
4699 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
4700 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
4701 BFA_DMA_ALIGN_SZ);
4702
4703 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
4704 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
4705
Jing Huang6a18b162010-10-18 17:08:54 -07004706 memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004707}
4708
4709static void
4710claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4711{
4712 struct bfi_uf_buf_post_s *uf_bp_msg;
4713 struct bfi_sge_s *sge;
4714 union bfi_addr_u sga_zero = { {0} };
4715 u16 i;
4716 u16 buf_len;
4717
4718 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
4719 uf_bp_msg = ufm->uf_buf_posts;
4720
4721 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4722 i++, uf_bp_msg++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004723 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004724
4725 uf_bp_msg->buf_tag = i;
4726 buf_len = sizeof(struct bfa_uf_buf_s);
Jing Huangba816ea2010-10-18 17:10:50 -07004727 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004728 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
4729 bfa_lpuid(ufm->bfa));
4730
4731 sge = uf_bp_msg->sge;
4732 sge[0].sg_len = buf_len;
4733 sge[0].flags = BFI_SGE_DATA_LAST;
4734 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
4735 bfa_sge_to_be(sge);
4736
4737 sge[1].sg_len = buf_len;
4738 sge[1].flags = BFI_SGE_PGDLEN;
4739 sge[1].sga = sga_zero;
4740 bfa_sge_to_be(&sge[1]);
4741 }
4742
Jing Huang5fbe25c2010-10-18 17:17:23 -07004743 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004744 * advance pointer beyond consumed memory
4745 */
4746 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
4747}
4748
4749static void
4750claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4751{
4752 u16 i;
4753 struct bfa_uf_s *uf;
4754
4755 /*
4756 * Claim block of memory for UF list
4757 */
4758 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
4759
4760 /*
4761 * Initialize UFs and queue it in UF free queue
4762 */
4763 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004764 memset(uf, 0, sizeof(struct bfa_uf_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004765 uf->bfa = ufm->bfa;
4766 uf->uf_tag = i;
4767 uf->pb_len = sizeof(struct bfa_uf_buf_s);
4768 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
4769 uf->buf_pa = ufm_pbs_pa(ufm, i);
4770 list_add_tail(&uf->qe, &ufm->uf_free_q);
4771 }
4772
Jing Huang5fbe25c2010-10-18 17:17:23 -07004773 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004774 * advance memory pointer
4775 */
4776 bfa_meminfo_kva(mi) = (u8 *) uf;
4777}
4778
4779static void
4780uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
4781{
4782 claim_uf_pbs(ufm, mi);
4783 claim_ufs(ufm, mi);
4784 claim_uf_post_msgs(ufm, mi);
4785}
4786
4787static void
4788bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
4789{
4790 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
4791
4792 /*
4793 * dma-able memory for UF posted bufs
4794 */
4795 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
4796 BFA_DMA_ALIGN_SZ);
4797
4798 /*
4799 * kernel Virtual memory for UFs and UF buf post msg copies
4800 */
4801 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
4802 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
4803}
4804
4805static void
4806bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4807 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4808{
4809 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4810
Jing Huang6a18b162010-10-18 17:08:54 -07004811 memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004812 ufm->bfa = bfa;
4813 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
4814 INIT_LIST_HEAD(&ufm->uf_free_q);
4815 INIT_LIST_HEAD(&ufm->uf_posted_q);
4816
4817 uf_mem_claim(ufm, meminfo);
4818}
4819
4820static void
4821bfa_uf_detach(struct bfa_s *bfa)
4822{
4823}
4824
4825static struct bfa_uf_s *
4826bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
4827{
4828 struct bfa_uf_s *uf;
4829
4830 bfa_q_deq(&uf_mod->uf_free_q, &uf);
4831 return uf;
4832}
4833
4834static void
4835bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
4836{
4837 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
4838}
4839
4840static bfa_status_t
4841bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
4842{
4843 struct bfi_uf_buf_post_s *uf_post_msg;
4844
4845 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
4846 if (!uf_post_msg)
4847 return BFA_STATUS_FAILED;
4848
Jing Huang6a18b162010-10-18 17:08:54 -07004849 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004850 sizeof(struct bfi_uf_buf_post_s));
4851 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
4852
4853 bfa_trc(ufm->bfa, uf->uf_tag);
4854
4855 list_add_tail(&uf->qe, &ufm->uf_posted_q);
4856 return BFA_STATUS_OK;
4857}
4858
4859static void
4860bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
4861{
4862 struct bfa_uf_s *uf;
4863
4864 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
4865 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
4866 break;
4867 }
4868}
4869
4870static void
4871uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
4872{
4873 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4874 u16 uf_tag = m->buf_tag;
4875 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
4876 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
4877 u8 *buf = &uf_buf->d[0];
4878 struct fchs_s *fchs;
4879
Jing Huangba816ea2010-10-18 17:10:50 -07004880 m->frm_len = be16_to_cpu(m->frm_len);
4881 m->xfr_len = be16_to_cpu(m->xfr_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004882
4883 fchs = (struct fchs_s *)uf_buf;
4884
4885 list_del(&uf->qe); /* dequeue from posted queue */
4886
4887 uf->data_ptr = buf;
4888 uf->data_len = m->xfr_len;
4889
4890 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
4891
4892 if (uf->data_len == sizeof(struct fchs_s)) {
4893 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
4894 uf->data_len, (struct fchs_s *)buf);
4895 } else {
4896 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
4897 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
4898 BFA_PL_EID_RX, uf->data_len,
4899 (struct fchs_s *)buf, pld_w0);
4900 }
4901
4902 if (bfa->fcs)
4903 __bfa_cb_uf_recv(uf, BFA_TRUE);
4904 else
4905 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
4906}
4907
4908static void
4909bfa_uf_stop(struct bfa_s *bfa)
4910{
4911}
4912
4913static void
4914bfa_uf_iocdisable(struct bfa_s *bfa)
4915{
4916 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4917 struct bfa_uf_s *uf;
4918 struct list_head *qe, *qen;
4919
4920 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
4921 uf = (struct bfa_uf_s *) qe;
4922 list_del(&uf->qe);
4923 bfa_uf_put(ufm, uf);
4924 }
4925}
4926
4927static void
4928bfa_uf_start(struct bfa_s *bfa)
4929{
4930 bfa_uf_post_all(BFA_UF_MOD(bfa));
4931}
4932
Jing Huang5fbe25c2010-10-18 17:17:23 -07004933/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004934 * hal_uf_api
4935 */
4936
Jing Huang5fbe25c2010-10-18 17:17:23 -07004937/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004938 * Register handler for all unsolicted recieve frames.
4939 *
4940 * @param[in] bfa BFA instance
4941 * @param[in] ufrecv receive handler function
4942 * @param[in] cbarg receive handler arg
4943 */
4944void
4945bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
4946{
4947 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
4948
4949 ufm->ufrecv = ufrecv;
4950 ufm->cbarg = cbarg;
4951}
4952
Jing Huang5fbe25c2010-10-18 17:17:23 -07004953/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004954 * Free an unsolicited frame back to BFA.
4955 *
4956 * @param[in] uf unsolicited frame to be freed
4957 *
4958 * @return None
4959 */
4960void
4961bfa_uf_free(struct bfa_uf_s *uf)
4962{
4963 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
4964 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
4965}
4966
4967
4968
Jing Huang5fbe25c2010-10-18 17:17:23 -07004969/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004970 * uf_pub BFA uf module public functions
4971 */
4972void
4973bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
4974{
4975 bfa_trc(bfa, msg->mhdr.msg_id);
4976
4977 switch (msg->mhdr.msg_id) {
4978 case BFI_UF_I2H_FRM_RCVD:
4979 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
4980 break;
4981
4982 default:
4983 bfa_trc(bfa, msg->mhdr.msg_id);
4984 bfa_assert(0);
4985 }
4986}
4987
4988