blob: 549bd45a7dbedf9aaccff5067e9bd44f29660d42 [file] [log] [blame]
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipati7826f302011-07-20 16:59:13 -070019#include "bfad_im.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070020#include "bfa_plog.h"
21#include "bfa_cs.h"
22#include "bfa_modules.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070023
24BFA_TRC_FILE(HAL, FCXP);
Krishna Gudipati3d7fc662011-06-24 20:28:17 -070025BFA_MODULE(fcdiag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070026BFA_MODULE(fcxp);
27BFA_MODULE(sgpg);
28BFA_MODULE(lps);
29BFA_MODULE(fcport);
30BFA_MODULE(rport);
31BFA_MODULE(uf);
32
Jing Huang5fbe25c2010-10-18 17:17:23 -070033/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070034 * LPS related definitions
35 */
36#define BFA_LPS_MIN_LPORTS (1)
37#define BFA_LPS_MAX_LPORTS (256)
38
39/*
40 * Maximum Vports supported per physical port or vf.
41 */
42#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
43#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
44
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070045
Jing Huang5fbe25c2010-10-18 17:17:23 -070046/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070047 * FC PORT related definitions
48 */
49/*
50 * The port is considered disabled if corresponding physical port or IOC are
51 * disabled explicitly
52 */
53#define BFA_PORT_IS_DISABLED(bfa) \
54 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
55 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
56
Jing Huang5fbe25c2010-10-18 17:17:23 -070057/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070058 * BFA port state machine events
59 */
60enum bfa_fcport_sm_event {
61 BFA_FCPORT_SM_START = 1, /* start port state machine */
62 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
63 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
64 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
65 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
66 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
67 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
68 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
69 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
Krishna Gudipatie3535462012-09-21 17:26:07 -070070 BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
71 BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070072};
73
Jing Huang5fbe25c2010-10-18 17:17:23 -070074/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070075 * BFA port link notification state machine events
76 */
77
78enum bfa_fcport_ln_sm_event {
79 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
80 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
81 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
82};
83
Jing Huang5fbe25c2010-10-18 17:17:23 -070084/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070085 * RPORT related definitions
86 */
87#define bfa_rport_offline_cb(__rp) do { \
88 if ((__rp)->bfa->fcs) \
89 bfa_cb_rport_offline((__rp)->rport_drv); \
90 else { \
91 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
92 __bfa_cb_rport_offline, (__rp)); \
93 } \
94} while (0)
95
96#define bfa_rport_online_cb(__rp) do { \
97 if ((__rp)->bfa->fcs) \
98 bfa_cb_rport_online((__rp)->rport_drv); \
99 else { \
100 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
101 __bfa_cb_rport_online, (__rp)); \
102 } \
103} while (0)
104
Jing Huang5fbe25c2010-10-18 17:17:23 -0700105/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700106 * forward declarations FCXP related functions
107 */
108static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
109static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
110 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
111static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
112 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
113static void bfa_fcxp_qresume(void *cbarg);
114static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
115 struct bfi_fcxp_send_req_s *send_req);
116
Jing Huang5fbe25c2010-10-18 17:17:23 -0700117/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700118 * forward declarations for LPS functions
119 */
Krishna Gudipati45070252011-06-24 20:24:29 -0700120static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
121 struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700122static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
123 struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700124 struct bfa_pcidev_s *pcidev);
125static void bfa_lps_detach(struct bfa_s *bfa);
126static void bfa_lps_start(struct bfa_s *bfa);
127static void bfa_lps_stop(struct bfa_s *bfa);
128static void bfa_lps_iocdisable(struct bfa_s *bfa);
129static void bfa_lps_login_rsp(struct bfa_s *bfa,
130 struct bfi_lps_login_rsp_s *rsp);
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700131static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700132static void bfa_lps_logout_rsp(struct bfa_s *bfa,
133 struct bfi_lps_logout_rsp_s *rsp);
134static void bfa_lps_reqq_resume(void *lps_arg);
135static void bfa_lps_free(struct bfa_lps_s *lps);
136static void bfa_lps_send_login(struct bfa_lps_s *lps);
137static void bfa_lps_send_logout(struct bfa_lps_s *lps);
Krishna Gudipatib7044952010-12-13 16:17:42 -0800138static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700139static void bfa_lps_login_comp(struct bfa_lps_s *lps);
140static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
141static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
142
Jing Huang5fbe25c2010-10-18 17:17:23 -0700143/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700144 * forward declaration for LPS state machine
145 */
146static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
147static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
148static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
149 event);
150static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
Krishna Gudipatib7044952010-12-13 16:17:42 -0800151static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
152 enum bfa_lps_event event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700153static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
154static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
155 event);
156
Jing Huang5fbe25c2010-10-18 17:17:23 -0700157/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700158 * forward declaration for FC Port functions
159 */
160static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
161static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
162static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
163static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
164static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
165static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
166static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
167 enum bfa_port_linkstate event, bfa_boolean_t trunk);
168static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
169 enum bfa_port_linkstate event);
170static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
171static void bfa_fcport_stats_get_timeout(void *cbarg);
172static void bfa_fcport_stats_clr_timeout(void *cbarg);
173static void bfa_trunk_iocdisable(struct bfa_s *bfa);
174
Jing Huang5fbe25c2010-10-18 17:17:23 -0700175/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700176 * forward declaration for FC PORT state machine
177 */
178static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
179 enum bfa_fcport_sm_event event);
180static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
181 enum bfa_fcport_sm_event event);
182static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
183 enum bfa_fcport_sm_event event);
184static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
185 enum bfa_fcport_sm_event event);
186static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
187 enum bfa_fcport_sm_event event);
188static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
189 enum bfa_fcport_sm_event event);
190static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
191 enum bfa_fcport_sm_event event);
192static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
193 enum bfa_fcport_sm_event event);
194static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
195 enum bfa_fcport_sm_event event);
196static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
197 enum bfa_fcport_sm_event event);
198static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
199 enum bfa_fcport_sm_event event);
200static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
201 enum bfa_fcport_sm_event event);
Krishna Gudipatie3535462012-09-21 17:26:07 -0700202static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
203 enum bfa_fcport_sm_event event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700204
205static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
206 enum bfa_fcport_ln_sm_event event);
207static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
208 enum bfa_fcport_ln_sm_event event);
209static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
210 enum bfa_fcport_ln_sm_event event);
211static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
212 enum bfa_fcport_ln_sm_event event);
213static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
214 enum bfa_fcport_ln_sm_event event);
215static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
216 enum bfa_fcport_ln_sm_event event);
217static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
218 enum bfa_fcport_ln_sm_event event);
219
220static struct bfa_sm_table_s hal_port_sm_table[] = {
221 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
222 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
223 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
224 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
225 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
226 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
227 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
228 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
229 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
230 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
231 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
232 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
Krishna Gudipatie3535462012-09-21 17:26:07 -0700233 {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700234};
235
236
Jing Huang5fbe25c2010-10-18 17:17:23 -0700237/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700238 * forward declaration for RPORT related functions
239 */
240static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
241static void bfa_rport_free(struct bfa_rport_s *rport);
242static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
243static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
244static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
245static void __bfa_cb_rport_online(void *cbarg,
246 bfa_boolean_t complete);
247static void __bfa_cb_rport_offline(void *cbarg,
248 bfa_boolean_t complete);
249
Jing Huang5fbe25c2010-10-18 17:17:23 -0700250/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700251 * forward declaration for RPORT state machine
252 */
253static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
254 enum bfa_rport_event event);
255static void bfa_rport_sm_created(struct bfa_rport_s *rp,
256 enum bfa_rport_event event);
257static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
258 enum bfa_rport_event event);
259static void bfa_rport_sm_online(struct bfa_rport_s *rp,
260 enum bfa_rport_event event);
261static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
262 enum bfa_rport_event event);
263static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
264 enum bfa_rport_event event);
265static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
266 enum bfa_rport_event event);
267static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
268 enum bfa_rport_event event);
269static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
270 enum bfa_rport_event event);
271static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
272 enum bfa_rport_event event);
273static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
274 enum bfa_rport_event event);
275static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
276 enum bfa_rport_event event);
277static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
278 enum bfa_rport_event event);
279
Jing Huang5fbe25c2010-10-18 17:17:23 -0700280/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700281 * PLOG related definitions
282 */
283static int
284plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
285{
286 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
287 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
288 return 1;
289
290 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
291 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
292 return 1;
293
294 return 0;
295}
296
Maggie Zhangf16a1752010-12-09 19:12:32 -0800297static u64
298bfa_get_log_time(void)
299{
300 u64 system_time = 0;
301 struct timeval tv;
302 do_gettimeofday(&tv);
303
304 /* We are interested in seconds only. */
305 system_time = tv.tv_sec;
306 return system_time;
307}
308
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700309static void
310bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
311{
312 u16 tail;
313 struct bfa_plog_rec_s *pl_recp;
314
315 if (plog->plog_enabled == 0)
316 return;
317
318 if (plkd_validate_logrec(pl_rec)) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800319 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700320 return;
321 }
322
323 tail = plog->tail;
324
325 pl_recp = &(plog->plog_recs[tail]);
326
Jing Huang6a18b162010-10-18 17:08:54 -0700327 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700328
Maggie Zhangf16a1752010-12-09 19:12:32 -0800329 pl_recp->tv = bfa_get_log_time();
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700330 BFA_PL_LOG_REC_INCR(plog->tail);
331
332 if (plog->head == plog->tail)
333 BFA_PL_LOG_REC_INCR(plog->head);
334}
335
336void
337bfa_plog_init(struct bfa_plog_s *plog)
338{
Jing Huang6a18b162010-10-18 17:08:54 -0700339 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700340
Jing Huang6a18b162010-10-18 17:08:54 -0700341 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700342 plog->head = plog->tail = 0;
343 plog->plog_enabled = 1;
344}
345
346void
347bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
348 enum bfa_plog_eid event,
349 u16 misc, char *log_str)
350{
351 struct bfa_plog_rec_s lp;
352
353 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700354 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700355 lp.mid = mid;
356 lp.eid = event;
357 lp.log_type = BFA_PL_LOG_TYPE_STRING;
358 lp.misc = misc;
359 strncpy(lp.log_entry.string_log, log_str,
360 BFA_PL_STRING_LOG_SZ - 1);
361 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
362 bfa_plog_add(plog, &lp);
363 }
364}
365
366void
367bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
368 enum bfa_plog_eid event,
369 u16 misc, u32 *intarr, u32 num_ints)
370{
371 struct bfa_plog_rec_s lp;
372 u32 i;
373
374 if (num_ints > BFA_PL_INT_LOG_SZ)
375 num_ints = BFA_PL_INT_LOG_SZ;
376
377 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700378 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700379 lp.mid = mid;
380 lp.eid = event;
381 lp.log_type = BFA_PL_LOG_TYPE_INT;
382 lp.misc = misc;
383
384 for (i = 0; i < num_ints; i++)
Jing Huang6a18b162010-10-18 17:08:54 -0700385 lp.log_entry.int_log[i] = intarr[i];
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700386
387 lp.log_num_ints = (u8) num_ints;
388
389 bfa_plog_add(plog, &lp);
390 }
391}
392
393void
394bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
395 enum bfa_plog_eid event,
396 u16 misc, struct fchs_s *fchdr)
397{
398 struct bfa_plog_rec_s lp;
399 u32 *tmp_int = (u32 *) fchdr;
400 u32 ints[BFA_PL_INT_LOG_SZ];
401
402 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700403 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700404
405 ints[0] = tmp_int[0];
406 ints[1] = tmp_int[1];
407 ints[2] = tmp_int[4];
408
409 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
410 }
411}
412
413void
414bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
415 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
416 u32 pld_w0)
417{
418 struct bfa_plog_rec_s lp;
419 u32 *tmp_int = (u32 *) fchdr;
420 u32 ints[BFA_PL_INT_LOG_SZ];
421
422 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700423 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700424
425 ints[0] = tmp_int[0];
426 ints[1] = tmp_int[1];
427 ints[2] = tmp_int[4];
428 ints[3] = pld_w0;
429
430 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
431 }
432}
433
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700434
Jing Huang5fbe25c2010-10-18 17:17:23 -0700435/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700436 * fcxp_pvt BFA FCXP private functions
437 */
438
439static void
Krishna Gudipati45070252011-06-24 20:24:29 -0700440claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700441{
442 u16 i;
443 struct bfa_fcxp_s *fcxp;
444
Krishna Gudipati45070252011-06-24 20:24:29 -0700445 fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
Jing Huang6a18b162010-10-18 17:08:54 -0700446 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700447
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700448 INIT_LIST_HEAD(&mod->fcxp_req_free_q);
449 INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700450 INIT_LIST_HEAD(&mod->fcxp_active_q);
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700451 INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
452 INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700453
454 mod->fcxp_list = fcxp;
455
456 for (i = 0; i < mod->num_fcxps; i++) {
457 fcxp->fcxp_mod = mod;
458 fcxp->fcxp_tag = i;
459
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700460 if (i < (mod->num_fcxps / 2)) {
461 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
462 fcxp->req_rsp = BFA_TRUE;
463 } else {
464 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
465 fcxp->req_rsp = BFA_FALSE;
466 }
467
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700468 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
469 fcxp->reqq_waiting = BFA_FALSE;
470
471 fcxp = fcxp + 1;
472 }
473
Krishna Gudipati45070252011-06-24 20:24:29 -0700474 bfa_mem_kva_curp(mod) = (void *)fcxp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700475}
476
477static void
Krishna Gudipati45070252011-06-24 20:24:29 -0700478bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
479 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700480{
Krishna Gudipati45070252011-06-24 20:24:29 -0700481 struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
482 struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
483 struct bfa_mem_dma_s *seg_ptr;
484 u16 nsegs, idx, per_seg_fcxp;
485 u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs;
486 u32 per_fcxp_sz;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700487
Krishna Gudipati45070252011-06-24 20:24:29 -0700488 if (num_fcxps == 0)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700489 return;
490
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700491 if (cfg->drvcfg.min_cfg)
Krishna Gudipati45070252011-06-24 20:24:29 -0700492 per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700493 else
Krishna Gudipati45070252011-06-24 20:24:29 -0700494 per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700495
Krishna Gudipati45070252011-06-24 20:24:29 -0700496 /* dma memory */
497 nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
498 per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
499
500 bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
501 if (num_fcxps >= per_seg_fcxp) {
502 num_fcxps -= per_seg_fcxp;
503 bfa_mem_dma_setup(minfo, seg_ptr,
504 per_seg_fcxp * per_fcxp_sz);
505 } else
506 bfa_mem_dma_setup(minfo, seg_ptr,
507 num_fcxps * per_fcxp_sz);
508 }
509
510 /* kva memory */
511 bfa_mem_kva_setup(minfo, fcxp_kva,
512 cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700513}
514
515static void
516bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -0700517 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700518{
519 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
520
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700521 mod->bfa = bfa;
522 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
523
Jing Huang5fbe25c2010-10-18 17:17:23 -0700524 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700525 * Initialize FCXP request and response payload sizes.
526 */
527 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
528 if (!cfg->drvcfg.min_cfg)
529 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
530
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700531 INIT_LIST_HEAD(&mod->req_wait_q);
532 INIT_LIST_HEAD(&mod->rsp_wait_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700533
Krishna Gudipati45070252011-06-24 20:24:29 -0700534 claim_fcxps_mem(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700535}
536
537static void
538bfa_fcxp_detach(struct bfa_s *bfa)
539{
540}
541
542static void
543bfa_fcxp_start(struct bfa_s *bfa)
544{
545}
546
547static void
548bfa_fcxp_stop(struct bfa_s *bfa)
549{
550}
551
552static void
553bfa_fcxp_iocdisable(struct bfa_s *bfa)
554{
555 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
556 struct bfa_fcxp_s *fcxp;
557 struct list_head *qe, *qen;
558
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700559 /* Enqueue unused fcxp resources to free_q */
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700560 list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
561 list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700562
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700563 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
564 fcxp = (struct bfa_fcxp_s *) qe;
565 if (fcxp->caller == NULL) {
566 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
567 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
568 bfa_fcxp_free(fcxp);
569 } else {
570 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
571 bfa_cb_queue(bfa, &fcxp->hcb_qe,
572 __bfa_fcxp_send_cbfn, fcxp);
573 }
574 }
575}
576
577static struct bfa_fcxp_s *
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700578bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700579{
580 struct bfa_fcxp_s *fcxp;
581
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700582 if (req)
583 bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
584 else
585 bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700586
587 if (fcxp)
588 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
589
590 return fcxp;
591}
592
593static void
594bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
595 struct bfa_s *bfa,
596 u8 *use_ibuf,
597 u32 *nr_sgles,
598 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
599 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
600 struct list_head *r_sgpg_q,
601 int n_sgles,
602 bfa_fcxp_get_sgaddr_t sga_cbfn,
603 bfa_fcxp_get_sglen_t sglen_cbfn)
604{
605
Jing Huangd4b671c2010-12-26 21:46:35 -0800606 WARN_ON(bfa == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700607
608 bfa_trc(bfa, fcxp->fcxp_tag);
609
610 if (n_sgles == 0) {
611 *use_ibuf = 1;
612 } else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800613 WARN_ON(*sga_cbfn == NULL);
614 WARN_ON(*sglen_cbfn == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700615
616 *use_ibuf = 0;
617 *r_sga_cbfn = sga_cbfn;
618 *r_sglen_cbfn = sglen_cbfn;
619
620 *nr_sgles = n_sgles;
621
622 /*
623 * alloc required sgpgs
624 */
625 if (n_sgles > BFI_SGE_INLINE)
Jing Huangd4b671c2010-12-26 21:46:35 -0800626 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700627 }
628
629}
630
631static void
632bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
633 void *caller, struct bfa_s *bfa, int nreq_sgles,
634 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
635 bfa_fcxp_get_sglen_t req_sglen_cbfn,
636 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
637 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
638{
639
Jing Huangd4b671c2010-12-26 21:46:35 -0800640 WARN_ON(bfa == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700641
642 bfa_trc(bfa, fcxp->fcxp_tag);
643
644 fcxp->caller = caller;
645
646 bfa_fcxp_init_reqrsp(fcxp, bfa,
647 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
648 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
649 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
650
651 bfa_fcxp_init_reqrsp(fcxp, bfa,
652 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
653 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
654 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
655
656}
657
658static void
659bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
660{
661 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
662 struct bfa_fcxp_wqe_s *wqe;
663
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700664 if (fcxp->req_rsp)
665 bfa_q_deq(&mod->req_wait_q, &wqe);
666 else
667 bfa_q_deq(&mod->rsp_wait_q, &wqe);
668
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700669 if (wqe) {
670 bfa_trc(mod->bfa, fcxp->fcxp_tag);
671
672 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
673 wqe->nrsp_sgles, wqe->req_sga_cbfn,
674 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
675 wqe->rsp_sglen_cbfn);
676
677 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
678 return;
679 }
680
Jing Huangd4b671c2010-12-26 21:46:35 -0800681 WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700682 list_del(&fcxp->qe);
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700683
684 if (fcxp->req_rsp)
685 list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
686 else
687 list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700688}
689
690static void
691bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
692 bfa_status_t req_status, u32 rsp_len,
693 u32 resid_len, struct fchs_s *rsp_fchs)
694{
695 /* discarded fcxp completion */
696}
697
698static void
699__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
700{
701 struct bfa_fcxp_s *fcxp = cbarg;
702
703 if (complete) {
704 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
705 fcxp->rsp_status, fcxp->rsp_len,
706 fcxp->residue_len, &fcxp->rsp_fchs);
707 } else {
708 bfa_fcxp_free(fcxp);
709 }
710}
711
712static void
713hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
714{
715 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
716 struct bfa_fcxp_s *fcxp;
Jing Huangba816ea2010-10-18 17:10:50 -0700717 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700718
719 bfa_trc(bfa, fcxp_tag);
720
Jing Huangba816ea2010-10-18 17:10:50 -0700721 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700722
Jing Huang5fbe25c2010-10-18 17:17:23 -0700723 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700724 * @todo f/w should not set residue to non-0 when everything
725 * is received.
726 */
727 if (fcxp_rsp->req_status == BFA_STATUS_OK)
728 fcxp_rsp->residue_len = 0;
729 else
Jing Huangba816ea2010-10-18 17:10:50 -0700730 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700731
732 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
733
Jing Huangd4b671c2010-12-26 21:46:35 -0800734 WARN_ON(fcxp->send_cbfn == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700735
736 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
737
738 if (fcxp->send_cbfn != NULL) {
739 bfa_trc(mod->bfa, (NULL == fcxp->caller));
740 if (fcxp->caller == NULL) {
741 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
742 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
743 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
744 /*
745 * fcxp automatically freed on return from the callback
746 */
747 bfa_fcxp_free(fcxp);
748 } else {
749 fcxp->rsp_status = fcxp_rsp->req_status;
750 fcxp->rsp_len = fcxp_rsp->rsp_len;
751 fcxp->residue_len = fcxp_rsp->residue_len;
752 fcxp->rsp_fchs = fcxp_rsp->fchs;
753
754 bfa_cb_queue(bfa, &fcxp->hcb_qe,
755 __bfa_fcxp_send_cbfn, fcxp);
756 }
757 } else {
758 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
759 }
760}
761
762static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700763hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
764 struct fchs_s *fchs)
765{
766 /*
767 * TODO: TX ox_id
768 */
769 if (reqlen > 0) {
770 if (fcxp->use_ireqbuf) {
771 u32 pld_w0 =
772 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
773
774 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
775 BFA_PL_EID_TX,
776 reqlen + sizeof(struct fchs_s), fchs,
777 pld_w0);
778 } else {
779 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
780 BFA_PL_EID_TX,
781 reqlen + sizeof(struct fchs_s),
782 fchs);
783 }
784 } else {
785 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
786 reqlen + sizeof(struct fchs_s), fchs);
787 }
788}
789
790static void
791hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
792 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
793{
794 if (fcxp_rsp->rsp_len > 0) {
795 if (fcxp->use_irspbuf) {
796 u32 pld_w0 =
797 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
798
799 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
800 BFA_PL_EID_RX,
801 (u16) fcxp_rsp->rsp_len,
802 &fcxp_rsp->fchs, pld_w0);
803 } else {
804 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
805 BFA_PL_EID_RX,
806 (u16) fcxp_rsp->rsp_len,
807 &fcxp_rsp->fchs);
808 }
809 } else {
810 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
811 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
812 }
813}
814
Jing Huang5fbe25c2010-10-18 17:17:23 -0700815/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700816 * Handler to resume sending fcxp when space in available in cpe queue.
817 */
818static void
819bfa_fcxp_qresume(void *cbarg)
820{
821 struct bfa_fcxp_s *fcxp = cbarg;
822 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
823 struct bfi_fcxp_send_req_s *send_req;
824
825 fcxp->reqq_waiting = BFA_FALSE;
826 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
827 bfa_fcxp_queue(fcxp, send_req);
828}
829
Jing Huang5fbe25c2010-10-18 17:17:23 -0700830/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700831 * Queue fcxp send request to foimrware.
832 */
833static void
834bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
835{
836 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
837 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
838 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
839 struct bfa_rport_s *rport = reqi->bfa_rport;
840
841 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700842 bfa_fn_lpu(bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700843
Jing Huangba816ea2010-10-18 17:10:50 -0700844 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700845 if (rport) {
846 send_req->rport_fw_hndl = rport->fw_handle;
Jing Huangba816ea2010-10-18 17:10:50 -0700847 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700848 if (send_req->max_frmsz == 0)
Jing Huangba816ea2010-10-18 17:10:50 -0700849 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700850 } else {
851 send_req->rport_fw_hndl = 0;
Jing Huangba816ea2010-10-18 17:10:50 -0700852 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700853 }
854
Jing Huangba816ea2010-10-18 17:10:50 -0700855 send_req->vf_id = cpu_to_be16(reqi->vf_id);
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700856 send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700857 send_req->class = reqi->class;
858 send_req->rsp_timeout = rspi->rsp_timeout;
859 send_req->cts = reqi->cts;
860 send_req->fchs = reqi->fchs;
861
Jing Huangba816ea2010-10-18 17:10:50 -0700862 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
863 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700864
865 /*
866 * setup req sgles
867 */
868 if (fcxp->use_ireqbuf == 1) {
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700869 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700870 BFA_FCXP_REQ_PLD_PA(fcxp));
871 } else {
872 if (fcxp->nreq_sgles > 0) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800873 WARN_ON(fcxp->nreq_sgles != 1);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700874 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
875 fcxp->req_sga_cbfn(fcxp->caller, 0));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700876 } else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800877 WARN_ON(reqi->req_tot_len != 0);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700878 bfa_alen_set(&send_req->rsp_alen, 0, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700879 }
880 }
881
882 /*
883 * setup rsp sgles
884 */
885 if (fcxp->use_irspbuf == 1) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800886 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700887
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700888 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700889 BFA_FCXP_RSP_PLD_PA(fcxp));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700890 } else {
891 if (fcxp->nrsp_sgles > 0) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800892 WARN_ON(fcxp->nrsp_sgles != 1);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700893 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
894 fcxp->rsp_sga_cbfn(fcxp->caller, 0));
895
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700896 } else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800897 WARN_ON(rspi->rsp_maxlen != 0);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700898 bfa_alen_set(&send_req->rsp_alen, 0, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700899 }
900 }
901
902 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
903
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700904 bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700905
906 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
907 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
908}
909
Jing Huang5fbe25c2010-10-18 17:17:23 -0700910/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700911 * Allocate an FCXP instance to send a response or to send a request
912 * that has a response. Request/response buffers are allocated by caller.
913 *
914 * @param[in] bfa BFA bfa instance
915 * @param[in] nreq_sgles Number of SG elements required for request
916 * buffer. 0, if fcxp internal buffers are used.
917 * Use bfa_fcxp_get_reqbuf() to get the
918 * internal req buffer.
919 * @param[in] req_sgles SG elements describing request buffer. Will be
920 * copied in by BFA and hence can be freed on
921 * return from this function.
922 * @param[in] get_req_sga function ptr to be called to get a request SG
923 * Address (given the sge index).
924 * @param[in] get_req_sglen function ptr to be called to get a request SG
925 * len (given the sge index).
926 * @param[in] get_rsp_sga function ptr to be called to get a response SG
927 * Address (given the sge index).
928 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
929 * len (given the sge index).
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700930 * @param[in] req Allocated FCXP is used to send req or rsp?
931 * request - BFA_TRUE, response - BFA_FALSE
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700932 *
933 * @return FCXP instance. NULL on failure.
934 */
935struct bfa_fcxp_s *
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700936bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
937 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
938 bfa_fcxp_get_sglen_t req_sglen_cbfn,
939 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
940 bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700941{
942 struct bfa_fcxp_s *fcxp = NULL;
943
Jing Huangd4b671c2010-12-26 21:46:35 -0800944 WARN_ON(bfa == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700945
Krishna Gudipatic3f1b122012-08-22 19:51:08 -0700946 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700947 if (fcxp == NULL)
948 return NULL;
949
950 bfa_trc(bfa, fcxp->fcxp_tag);
951
952 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
953 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
954
955 return fcxp;
956}
957
Jing Huang5fbe25c2010-10-18 17:17:23 -0700958/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700959 * Get the internal request buffer pointer
960 *
961 * @param[in] fcxp BFA fcxp pointer
962 *
963 * @return pointer to the internal request buffer
964 */
965void *
966bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
967{
968 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
969 void *reqbuf;
970
Jing Huangd4b671c2010-12-26 21:46:35 -0800971 WARN_ON(fcxp->use_ireqbuf != 1);
Krishna Gudipati45070252011-06-24 20:24:29 -0700972 reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
973 mod->req_pld_sz + mod->rsp_pld_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700974 return reqbuf;
975}
976
977u32
978bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
979{
980 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
981
982 return mod->req_pld_sz;
983}
984
Jing Huang5fbe25c2010-10-18 17:17:23 -0700985/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700986 * Get the internal response buffer pointer
987 *
988 * @param[in] fcxp BFA fcxp pointer
989 *
990 * @return pointer to the internal request buffer
991 */
992void *
993bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
994{
995 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
Krishna Gudipati45070252011-06-24 20:24:29 -0700996 void *fcxp_buf;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700997
Jing Huangd4b671c2010-12-26 21:46:35 -0800998 WARN_ON(fcxp->use_irspbuf != 1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700999
Krishna Gudipati45070252011-06-24 20:24:29 -07001000 fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
1001 mod->req_pld_sz + mod->rsp_pld_sz);
1002
1003 /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
1004 return ((u8 *) fcxp_buf) + mod->req_pld_sz;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001005}
1006
Jing Huang5fbe25c2010-10-18 17:17:23 -07001007/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -08001008 * Free the BFA FCXP
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001009 *
1010 * @param[in] fcxp BFA fcxp pointer
1011 *
1012 * @return void
1013 */
1014void
1015bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1016{
1017 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1018
Jing Huangd4b671c2010-12-26 21:46:35 -08001019 WARN_ON(fcxp == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001020 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1021 bfa_fcxp_put(fcxp);
1022}
1023
Jing Huang5fbe25c2010-10-18 17:17:23 -07001024/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001025 * Send a FCXP request
1026 *
1027 * @param[in] fcxp BFA fcxp pointer
1028 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1029 * @param[in] vf_id virtual Fabric ID
1030 * @param[in] lp_tag lport tag
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001031 * @param[in] cts use Continuous sequence
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001032 * @param[in] cos fc Class of Service
1033 * @param[in] reqlen request length, does not include FCHS length
1034 * @param[in] fchs fc Header Pointer. The header content will be copied
1035 * in by BFA.
1036 *
1037 * @param[in] cbfn call back function to be called on receiving
1038 * the response
1039 * @param[in] cbarg arg for cbfn
1040 * @param[in] rsp_timeout
1041 * response timeout
1042 *
1043 * @return bfa_status_t
1044 */
1045void
1046bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1047 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1048 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1049 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1050{
1051 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1052 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1053 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1054 struct bfi_fcxp_send_req_s *send_req;
1055
1056 bfa_trc(bfa, fcxp->fcxp_tag);
1057
Jing Huang5fbe25c2010-10-18 17:17:23 -07001058 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001059 * setup request/response info
1060 */
1061 reqi->bfa_rport = rport;
1062 reqi->vf_id = vf_id;
1063 reqi->lp_tag = lp_tag;
1064 reqi->class = cos;
1065 rspi->rsp_timeout = rsp_timeout;
1066 reqi->cts = cts;
1067 reqi->fchs = *fchs;
1068 reqi->req_tot_len = reqlen;
1069 rspi->rsp_maxlen = rsp_maxlen;
1070 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1071 fcxp->send_cbarg = cbarg;
1072
Jing Huang5fbe25c2010-10-18 17:17:23 -07001073 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001074 * If no room in CPE queue, wait for space in request queue
1075 */
1076 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1077 if (!send_req) {
1078 bfa_trc(bfa, fcxp->fcxp_tag);
1079 fcxp->reqq_waiting = BFA_TRUE;
1080 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1081 return;
1082 }
1083
1084 bfa_fcxp_queue(fcxp, send_req);
1085}
1086
Jing Huang5fbe25c2010-10-18 17:17:23 -07001087/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001088 * Abort a BFA FCXP
1089 *
1090 * @param[in] fcxp BFA fcxp pointer
1091 *
1092 * @return void
1093 */
1094bfa_status_t
1095bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1096{
1097 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
Jing Huangd4b671c2010-12-26 21:46:35 -08001098 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001099 return BFA_STATUS_OK;
1100}
1101
1102void
Krishna Gudipatic3f1b122012-08-22 19:51:08 -07001103bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001104 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1105 void *caller, int nreq_sgles,
1106 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1107 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1108 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
Krishna Gudipatic3f1b122012-08-22 19:51:08 -07001109 bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001110{
1111 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1112
Krishna Gudipatic3f1b122012-08-22 19:51:08 -07001113 if (req)
1114 WARN_ON(!list_empty(&mod->fcxp_req_free_q));
1115 else
1116 WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001117
1118 wqe->alloc_cbfn = alloc_cbfn;
1119 wqe->alloc_cbarg = alloc_cbarg;
1120 wqe->caller = caller;
1121 wqe->bfa = bfa;
1122 wqe->nreq_sgles = nreq_sgles;
1123 wqe->nrsp_sgles = nrsp_sgles;
1124 wqe->req_sga_cbfn = req_sga_cbfn;
1125 wqe->req_sglen_cbfn = req_sglen_cbfn;
1126 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1127 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1128
Krishna Gudipatic3f1b122012-08-22 19:51:08 -07001129 if (req)
1130 list_add_tail(&wqe->qe, &mod->req_wait_q);
1131 else
1132 list_add_tail(&wqe->qe, &mod->rsp_wait_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001133}
1134
1135void
1136bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1137{
1138 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1139
Krishna Gudipatic3f1b122012-08-22 19:51:08 -07001140 WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
1141 !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001142 list_del(&wqe->qe);
1143}
1144
1145void
1146bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1147{
Jing Huang5fbe25c2010-10-18 17:17:23 -07001148 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001149 * If waiting for room in request queue, cancel reqq wait
1150 * and free fcxp.
1151 */
1152 if (fcxp->reqq_waiting) {
1153 fcxp->reqq_waiting = BFA_FALSE;
1154 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1155 bfa_fcxp_free(fcxp);
1156 return;
1157 }
1158
1159 fcxp->send_cbfn = bfa_fcxp_null_comp;
1160}
1161
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001162void
1163bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1164{
1165 switch (msg->mhdr.msg_id) {
1166 case BFI_FCXP_I2H_SEND_RSP:
1167 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1168 break;
1169
1170 default:
1171 bfa_trc(bfa, msg->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08001172 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001173 }
1174}
1175
1176u32
1177bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1178{
1179 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1180
1181 return mod->rsp_pld_sz;
1182}
1183
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001184void
1185bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1186{
1187 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1188 struct list_head *qe;
1189 int i;
1190
1191 for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
Krishna Gudipatic3f1b122012-08-22 19:51:08 -07001192 if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
1193 bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
1194 list_add_tail(qe, &mod->fcxp_req_unused_q);
1195 } else {
1196 bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
1197 list_add_tail(qe, &mod->fcxp_rsp_unused_q);
1198 }
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001199 }
1200}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001201
Jing Huang5fbe25c2010-10-18 17:17:23 -07001202/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001203 * BFA LPS state machine functions
1204 */
1205
Jing Huang5fbe25c2010-10-18 17:17:23 -07001206/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001207 * Init state -- no login
1208 */
1209static void
1210bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1211{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001212 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001213 bfa_trc(lps->bfa, event);
1214
1215 switch (event) {
1216 case BFA_LPS_SM_LOGIN:
1217 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1218 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1219 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1220 } else {
1221 bfa_sm_set_state(lps, bfa_lps_sm_login);
1222 bfa_lps_send_login(lps);
1223 }
1224
1225 if (lps->fdisc)
1226 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1227 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1228 else
1229 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1230 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1231 break;
1232
1233 case BFA_LPS_SM_LOGOUT:
1234 bfa_lps_logout_comp(lps);
1235 break;
1236
1237 case BFA_LPS_SM_DELETE:
1238 bfa_lps_free(lps);
1239 break;
1240
1241 case BFA_LPS_SM_RX_CVL:
1242 case BFA_LPS_SM_OFFLINE:
1243 break;
1244
1245 case BFA_LPS_SM_FWRSP:
1246 /*
1247 * Could happen when fabric detects loopback and discards
1248 * the lps request. Fw will eventually sent out the timeout
1249 * Just ignore
1250 */
1251 break;
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07001252 case BFA_LPS_SM_SET_N2N_PID:
1253 /*
1254 * When topology is set to loop, bfa_lps_set_n2n_pid() sends
1255 * this event. Ignore this event.
1256 */
1257 break;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001258
1259 default:
1260 bfa_sm_fault(lps->bfa, event);
1261 }
1262}
1263
Jing Huang5fbe25c2010-10-18 17:17:23 -07001264/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001265 * login is in progress -- awaiting response from firmware
1266 */
1267static void
1268bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1269{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001270 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001271 bfa_trc(lps->bfa, event);
1272
1273 switch (event) {
1274 case BFA_LPS_SM_FWRSP:
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07001275 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001276 if (lps->status == BFA_STATUS_OK) {
1277 bfa_sm_set_state(lps, bfa_lps_sm_online);
1278 if (lps->fdisc)
1279 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1280 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1281 else
1282 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1283 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
Krishna Gudipatib7044952010-12-13 16:17:42 -08001284 /* If N2N, send the assigned PID to FW */
1285 bfa_trc(lps->bfa, lps->fport);
1286 bfa_trc(lps->bfa, lps->lp_pid);
1287
1288 if (!lps->fport && lps->lp_pid)
1289 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001290 } else {
1291 bfa_sm_set_state(lps, bfa_lps_sm_init);
1292 if (lps->fdisc)
1293 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1294 BFA_PL_EID_LOGIN, 0,
1295 "FDISC Fail (RJT or timeout)");
1296 else
1297 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1298 BFA_PL_EID_LOGIN, 0,
1299 "FLOGI Fail (RJT or timeout)");
1300 }
1301 bfa_lps_login_comp(lps);
1302 break;
1303
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001304 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001305 bfa_sm_set_state(lps, bfa_lps_sm_init);
1306 break;
1307
Krishna Gudipatib7044952010-12-13 16:17:42 -08001308 case BFA_LPS_SM_SET_N2N_PID:
1309 bfa_trc(lps->bfa, lps->fport);
1310 bfa_trc(lps->bfa, lps->lp_pid);
1311 break;
1312
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001313 default:
1314 bfa_sm_fault(lps->bfa, event);
1315 }
1316}
1317
Jing Huang5fbe25c2010-10-18 17:17:23 -07001318/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001319 * login pending - awaiting space in request queue
1320 */
1321static void
1322bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1323{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001324 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001325 bfa_trc(lps->bfa, event);
1326
1327 switch (event) {
1328 case BFA_LPS_SM_RESUME:
1329 bfa_sm_set_state(lps, bfa_lps_sm_login);
Krishna Gudipatiff179e02012-03-13 17:40:31 -07001330 bfa_lps_send_login(lps);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001331 break;
1332
1333 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001334 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001335 bfa_sm_set_state(lps, bfa_lps_sm_init);
1336 bfa_reqq_wcancel(&lps->wqe);
1337 break;
1338
1339 case BFA_LPS_SM_RX_CVL:
1340 /*
1341 * Login was not even sent out; so when getting out
1342 * of this state, it will appear like a login retry
1343 * after Clear virtual link
1344 */
1345 break;
1346
1347 default:
1348 bfa_sm_fault(lps->bfa, event);
1349 }
1350}
1351
Jing Huang5fbe25c2010-10-18 17:17:23 -07001352/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001353 * login complete
1354 */
1355static void
1356bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1357{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001358 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001359 bfa_trc(lps->bfa, event);
1360
1361 switch (event) {
1362 case BFA_LPS_SM_LOGOUT:
1363 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1364 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1365 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1366 } else {
1367 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1368 bfa_lps_send_logout(lps);
1369 }
1370 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1371 BFA_PL_EID_LOGO, 0, "Logout");
1372 break;
1373
1374 case BFA_LPS_SM_RX_CVL:
1375 bfa_sm_set_state(lps, bfa_lps_sm_init);
1376
1377 /* Let the vport module know about this event */
1378 bfa_lps_cvl_event(lps);
1379 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1380 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1381 break;
1382
Krishna Gudipatib7044952010-12-13 16:17:42 -08001383 case BFA_LPS_SM_SET_N2N_PID:
1384 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1385 bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1386 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1387 } else
1388 bfa_lps_send_set_n2n_pid(lps);
1389 break;
1390
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001391 case BFA_LPS_SM_OFFLINE:
1392 case BFA_LPS_SM_DELETE:
1393 bfa_sm_set_state(lps, bfa_lps_sm_init);
1394 break;
1395
1396 default:
1397 bfa_sm_fault(lps->bfa, event);
1398 }
1399}
1400
Jing Huang8f4bfad2010-12-26 21:50:10 -08001401/*
Krishna Gudipatib7044952010-12-13 16:17:42 -08001402 * login complete
1403 */
1404static void
1405bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1406{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001407 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatib7044952010-12-13 16:17:42 -08001408 bfa_trc(lps->bfa, event);
1409
1410 switch (event) {
1411 case BFA_LPS_SM_RESUME:
1412 bfa_sm_set_state(lps, bfa_lps_sm_online);
1413 bfa_lps_send_set_n2n_pid(lps);
1414 break;
1415
1416 case BFA_LPS_SM_LOGOUT:
1417 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1418 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1419 BFA_PL_EID_LOGO, 0, "Logout");
1420 break;
1421
1422 case BFA_LPS_SM_RX_CVL:
1423 bfa_sm_set_state(lps, bfa_lps_sm_init);
1424 bfa_reqq_wcancel(&lps->wqe);
1425
1426 /* Let the vport module know about this event */
1427 bfa_lps_cvl_event(lps);
1428 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1429 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1430 break;
1431
1432 case BFA_LPS_SM_OFFLINE:
1433 case BFA_LPS_SM_DELETE:
1434 bfa_sm_set_state(lps, bfa_lps_sm_init);
1435 bfa_reqq_wcancel(&lps->wqe);
1436 break;
1437
1438 default:
1439 bfa_sm_fault(lps->bfa, event);
1440 }
1441}
1442
Jing Huang5fbe25c2010-10-18 17:17:23 -07001443/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001444 * logout in progress - awaiting firmware response
1445 */
1446static void
1447bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1448{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001449 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001450 bfa_trc(lps->bfa, event);
1451
1452 switch (event) {
1453 case BFA_LPS_SM_FWRSP:
Krishna Gudipati881c1b32012-08-22 19:52:02 -07001454 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001455 bfa_sm_set_state(lps, bfa_lps_sm_init);
1456 bfa_lps_logout_comp(lps);
1457 break;
1458
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001459 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001460 bfa_sm_set_state(lps, bfa_lps_sm_init);
1461 break;
1462
1463 default:
1464 bfa_sm_fault(lps->bfa, event);
1465 }
1466}
1467
Jing Huang5fbe25c2010-10-18 17:17:23 -07001468/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001469 * logout pending -- awaiting space in request queue
1470 */
1471static void
1472bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1473{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001474 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001475 bfa_trc(lps->bfa, event);
1476
1477 switch (event) {
1478 case BFA_LPS_SM_RESUME:
1479 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1480 bfa_lps_send_logout(lps);
1481 break;
1482
1483 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001484 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001485 bfa_sm_set_state(lps, bfa_lps_sm_init);
1486 bfa_reqq_wcancel(&lps->wqe);
1487 break;
1488
1489 default:
1490 bfa_sm_fault(lps->bfa, event);
1491 }
1492}
1493
1494
1495
Jing Huang5fbe25c2010-10-18 17:17:23 -07001496/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001497 * lps_pvt BFA LPS private functions
1498 */
1499
Jing Huang5fbe25c2010-10-18 17:17:23 -07001500/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001501 * return memory requirement
1502 */
1503static void
Krishna Gudipati45070252011-06-24 20:24:29 -07001504bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1505 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001506{
Krishna Gudipati45070252011-06-24 20:24:29 -07001507 struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1508
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001509 if (cfg->drvcfg.min_cfg)
Krishna Gudipati45070252011-06-24 20:24:29 -07001510 bfa_mem_kva_setup(minfo, lps_kva,
1511 sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001512 else
Krishna Gudipati45070252011-06-24 20:24:29 -07001513 bfa_mem_kva_setup(minfo, lps_kva,
1514 sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001515}
1516
Jing Huang5fbe25c2010-10-18 17:17:23 -07001517/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001518 * bfa module attach at initialization time
1519 */
1520static void
1521bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07001522 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001523{
1524 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1525 struct bfa_lps_s *lps;
1526 int i;
1527
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001528 mod->num_lps = BFA_LPS_MAX_LPORTS;
1529 if (cfg->drvcfg.min_cfg)
1530 mod->num_lps = BFA_LPS_MIN_LPORTS;
1531 else
1532 mod->num_lps = BFA_LPS_MAX_LPORTS;
Krishna Gudipati45070252011-06-24 20:24:29 -07001533 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001534
Krishna Gudipati45070252011-06-24 20:24:29 -07001535 bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001536
1537 INIT_LIST_HEAD(&mod->lps_free_q);
1538 INIT_LIST_HEAD(&mod->lps_active_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001539 INIT_LIST_HEAD(&mod->lps_login_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001540
1541 for (i = 0; i < mod->num_lps; i++, lps++) {
1542 lps->bfa = bfa;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001543 lps->bfa_tag = (u8) i;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001544 lps->reqq = BFA_REQQ_LPS;
1545 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1546 list_add_tail(&lps->qe, &mod->lps_free_q);
1547 }
1548}
1549
1550static void
1551bfa_lps_detach(struct bfa_s *bfa)
1552{
1553}
1554
1555static void
1556bfa_lps_start(struct bfa_s *bfa)
1557{
1558}
1559
1560static void
1561bfa_lps_stop(struct bfa_s *bfa)
1562{
1563}
1564
Jing Huang5fbe25c2010-10-18 17:17:23 -07001565/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001566 * IOC in disabled state -- consider all lps offline
1567 */
1568static void
1569bfa_lps_iocdisable(struct bfa_s *bfa)
1570{
1571 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1572 struct bfa_lps_s *lps;
1573 struct list_head *qe, *qen;
1574
1575 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1576 lps = (struct bfa_lps_s *) qe;
1577 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1578 }
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001579 list_for_each_safe(qe, qen, &mod->lps_login_q) {
1580 lps = (struct bfa_lps_s *) qe;
1581 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1582 }
1583 list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001584}
1585
Jing Huang5fbe25c2010-10-18 17:17:23 -07001586/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001587 * Firmware login response
1588 */
1589static void
1590bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1591{
1592 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1593 struct bfa_lps_s *lps;
1594
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001595 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1596 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001597
1598 lps->status = rsp->status;
1599 switch (rsp->status) {
1600 case BFA_STATUS_OK:
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001601 lps->fw_tag = rsp->fw_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001602 lps->fport = rsp->f_port;
Krishna Gudipatib7044952010-12-13 16:17:42 -08001603 if (lps->fport)
1604 lps->lp_pid = rsp->lp_pid;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001605 lps->npiv_en = rsp->npiv_en;
Jing Huangba816ea2010-10-18 17:10:50 -07001606 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001607 lps->pr_pwwn = rsp->port_name;
1608 lps->pr_nwwn = rsp->node_name;
1609 lps->auth_req = rsp->auth_req;
1610 lps->lp_mac = rsp->lp_mac;
1611 lps->brcd_switch = rsp->brcd_switch;
1612 lps->fcf_mac = rsp->fcf_mac;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001613 lps->pr_bbscn = rsp->bb_scn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001614
1615 break;
1616
1617 case BFA_STATUS_FABRIC_RJT:
1618 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1619 lps->lsrjt_expl = rsp->lsrjt_expl;
1620
1621 break;
1622
1623 case BFA_STATUS_EPROTOCOL:
1624 lps->ext_status = rsp->ext_status;
1625
1626 break;
1627
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001628 case BFA_STATUS_VPORT_MAX:
Krishna Gudipatiff179e02012-03-13 17:40:31 -07001629 if (rsp->ext_status)
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001630 bfa_lps_no_res(lps, rsp->ext_status);
1631 break;
1632
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001633 default:
1634 /* Nothing to do with other status */
1635 break;
1636 }
1637
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001638 list_del(&lps->qe);
1639 list_add_tail(&lps->qe, &mod->lps_active_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001640 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1641}
1642
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001643static void
1644bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1645{
1646 struct bfa_s *bfa = first_lps->bfa;
1647 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1648 struct list_head *qe, *qe_next;
1649 struct bfa_lps_s *lps;
1650
1651 bfa_trc(bfa, count);
1652
1653 qe = bfa_q_next(first_lps);
1654
1655 while (count && qe) {
1656 qe_next = bfa_q_next(qe);
1657 lps = (struct bfa_lps_s *)qe;
1658 bfa_trc(bfa, lps->bfa_tag);
1659 lps->status = first_lps->status;
1660 list_del(&lps->qe);
1661 list_add_tail(&lps->qe, &mod->lps_active_q);
1662 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1663 qe = qe_next;
1664 count--;
1665 }
1666}
1667
Jing Huang5fbe25c2010-10-18 17:17:23 -07001668/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001669 * Firmware logout response
1670 */
1671static void
1672bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1673{
1674 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1675 struct bfa_lps_s *lps;
1676
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001677 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1678 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001679
1680 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1681}
1682
Jing Huang5fbe25c2010-10-18 17:17:23 -07001683/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001684 * Firmware received a Clear virtual link request (for FCoE)
1685 */
1686static void
1687bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1688{
1689 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1690 struct bfa_lps_s *lps;
1691
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001692 lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001693
1694 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1695}
1696
Jing Huang5fbe25c2010-10-18 17:17:23 -07001697/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001698 * Space is available in request queue, resume queueing request to firmware.
1699 */
1700static void
1701bfa_lps_reqq_resume(void *lps_arg)
1702{
1703 struct bfa_lps_s *lps = lps_arg;
1704
1705 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1706}
1707
Jing Huang5fbe25c2010-10-18 17:17:23 -07001708/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001709 * lps is freed -- triggered by vport delete
1710 */
1711static void
1712bfa_lps_free(struct bfa_lps_s *lps)
1713{
1714 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1715
1716 lps->lp_pid = 0;
1717 list_del(&lps->qe);
1718 list_add_tail(&lps->qe, &mod->lps_free_q);
1719}
1720
Jing Huang5fbe25c2010-10-18 17:17:23 -07001721/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001722 * send login request to firmware
1723 */
1724static void
1725bfa_lps_send_login(struct bfa_lps_s *lps)
1726{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001727 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001728 struct bfi_lps_login_req_s *m;
1729
1730 m = bfa_reqq_next(lps->bfa, lps->reqq);
Jing Huangd4b671c2010-12-26 21:46:35 -08001731 WARN_ON(!m);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001732
1733 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001734 bfa_fn_lpu(lps->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001735
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001736 m->bfa_tag = lps->bfa_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001737 m->alpa = lps->alpa;
Jing Huangba816ea2010-10-18 17:10:50 -07001738 m->pdu_size = cpu_to_be16(lps->pdusz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001739 m->pwwn = lps->pwwn;
1740 m->nwwn = lps->nwwn;
1741 m->fdisc = lps->fdisc;
1742 m->auth_en = lps->auth_en;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001743 m->bb_scn = lps->bb_scn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001744
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001745 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1746 list_del(&lps->qe);
1747 list_add_tail(&lps->qe, &mod->lps_login_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001748}
1749
Jing Huang5fbe25c2010-10-18 17:17:23 -07001750/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001751 * send logout request to firmware
1752 */
1753static void
1754bfa_lps_send_logout(struct bfa_lps_s *lps)
1755{
1756 struct bfi_lps_logout_req_s *m;
1757
1758 m = bfa_reqq_next(lps->bfa, lps->reqq);
Jing Huangd4b671c2010-12-26 21:46:35 -08001759 WARN_ON(!m);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001760
1761 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001762 bfa_fn_lpu(lps->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001763
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001764 m->fw_tag = lps->fw_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001765 m->port_name = lps->pwwn;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001766 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001767}
1768
Jing Huang8f4bfad2010-12-26 21:50:10 -08001769/*
Krishna Gudipatib7044952010-12-13 16:17:42 -08001770 * send n2n pid set request to firmware
1771 */
1772static void
1773bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1774{
1775 struct bfi_lps_n2n_pid_req_s *m;
1776
1777 m = bfa_reqq_next(lps->bfa, lps->reqq);
Jing Huangd4b671c2010-12-26 21:46:35 -08001778 WARN_ON(!m);
Krishna Gudipatib7044952010-12-13 16:17:42 -08001779
1780 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001781 bfa_fn_lpu(lps->bfa));
Krishna Gudipatib7044952010-12-13 16:17:42 -08001782
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001783 m->fw_tag = lps->fw_tag;
Krishna Gudipatib7044952010-12-13 16:17:42 -08001784 m->lp_pid = lps->lp_pid;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001785 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
Krishna Gudipatib7044952010-12-13 16:17:42 -08001786}
1787
Jing Huang5fbe25c2010-10-18 17:17:23 -07001788/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001789 * Indirect login completion handler for non-fcs
1790 */
1791static void
1792bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1793{
1794 struct bfa_lps_s *lps = arg;
1795
1796 if (!complete)
1797 return;
1798
1799 if (lps->fdisc)
1800 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1801 else
1802 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1803}
1804
Jing Huang5fbe25c2010-10-18 17:17:23 -07001805/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001806 * Login completion handler -- direct call for fcs, queue for others
1807 */
1808static void
1809bfa_lps_login_comp(struct bfa_lps_s *lps)
1810{
1811 if (!lps->bfa->fcs) {
1812 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1813 lps);
1814 return;
1815 }
1816
1817 if (lps->fdisc)
1818 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1819 else
1820 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1821}
1822
Jing Huang5fbe25c2010-10-18 17:17:23 -07001823/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001824 * Indirect logout completion handler for non-fcs
1825 */
1826static void
1827bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1828{
1829 struct bfa_lps_s *lps = arg;
1830
1831 if (!complete)
1832 return;
1833
1834 if (lps->fdisc)
1835 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
Krishna Gudipati881c1b32012-08-22 19:52:02 -07001836 else
1837 bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001838}
1839
Jing Huang5fbe25c2010-10-18 17:17:23 -07001840/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001841 * Logout completion handler -- direct call for fcs, queue for others
1842 */
1843static void
1844bfa_lps_logout_comp(struct bfa_lps_s *lps)
1845{
1846 if (!lps->bfa->fcs) {
1847 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1848 lps);
1849 return;
1850 }
1851 if (lps->fdisc)
1852 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1853}
1854
Jing Huang5fbe25c2010-10-18 17:17:23 -07001855/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001856 * Clear virtual link completion handler for non-fcs
1857 */
1858static void
1859bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1860{
1861 struct bfa_lps_s *lps = arg;
1862
1863 if (!complete)
1864 return;
1865
1866 /* Clear virtual link to base port will result in link down */
1867 if (lps->fdisc)
1868 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1869}
1870
Jing Huang5fbe25c2010-10-18 17:17:23 -07001871/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001872 * Received Clear virtual link event --direct call for fcs,
1873 * queue for others
1874 */
1875static void
1876bfa_lps_cvl_event(struct bfa_lps_s *lps)
1877{
1878 if (!lps->bfa->fcs) {
1879 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1880 lps);
1881 return;
1882 }
1883
1884 /* Clear virtual link to base port will result in link down */
1885 if (lps->fdisc)
1886 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1887}
1888
1889
1890
Jing Huang5fbe25c2010-10-18 17:17:23 -07001891/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001892 * lps_public BFA LPS public functions
1893 */
1894
1895u32
1896bfa_lps_get_max_vport(struct bfa_s *bfa)
1897{
1898 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1899 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1900 else
1901 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1902}
1903
Jing Huang5fbe25c2010-10-18 17:17:23 -07001904/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001905 * Allocate a lport srvice tag.
1906 */
1907struct bfa_lps_s *
1908bfa_lps_alloc(struct bfa_s *bfa)
1909{
1910 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1911 struct bfa_lps_s *lps = NULL;
1912
1913 bfa_q_deq(&mod->lps_free_q, &lps);
1914
1915 if (lps == NULL)
1916 return NULL;
1917
1918 list_add_tail(&lps->qe, &mod->lps_active_q);
1919
1920 bfa_sm_set_state(lps, bfa_lps_sm_init);
1921 return lps;
1922}
1923
Jing Huang5fbe25c2010-10-18 17:17:23 -07001924/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001925 * Free lport service tag. This can be called anytime after an alloc.
1926 * No need to wait for any pending login/logout completions.
1927 */
1928void
1929bfa_lps_delete(struct bfa_lps_s *lps)
1930{
1931 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1932}
1933
Jing Huang5fbe25c2010-10-18 17:17:23 -07001934/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001935 * Initiate a lport login.
1936 */
1937void
1938bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001939 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001940{
1941 lps->uarg = uarg;
1942 lps->alpa = alpa;
1943 lps->pdusz = pdusz;
1944 lps->pwwn = pwwn;
1945 lps->nwwn = nwwn;
1946 lps->fdisc = BFA_FALSE;
1947 lps->auth_en = auth_en;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001948 lps->bb_scn = bb_scn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001949 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1950}
1951
Jing Huang5fbe25c2010-10-18 17:17:23 -07001952/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001953 * Initiate a lport fdisc login.
1954 */
1955void
1956bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1957 wwn_t nwwn)
1958{
1959 lps->uarg = uarg;
1960 lps->alpa = 0;
1961 lps->pdusz = pdusz;
1962 lps->pwwn = pwwn;
1963 lps->nwwn = nwwn;
1964 lps->fdisc = BFA_TRUE;
1965 lps->auth_en = BFA_FALSE;
1966 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1967}
1968
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001969
Jing Huang5fbe25c2010-10-18 17:17:23 -07001970/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001971 * Initiate a lport FDSIC logout.
1972 */
1973void
1974bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1975{
1976 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1977}
1978
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001979u8
1980bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1981{
1982 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1983
1984 return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1985}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001986
Jing Huang5fbe25c2010-10-18 17:17:23 -07001987/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001988 * Return lport services tag given the pid
1989 */
1990u8
1991bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1992{
1993 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1994 struct bfa_lps_s *lps;
1995 int i;
1996
1997 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1998 if (lps->lp_pid == pid)
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001999 return lps->bfa_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002000 }
2001
2002 /* Return base port tag anyway */
2003 return 0;
2004}
2005
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002006
Jing Huang5fbe25c2010-10-18 17:17:23 -07002007/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002008 * return port id assigned to the base lport
2009 */
2010u32
2011bfa_lps_get_base_pid(struct bfa_s *bfa)
2012{
2013 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
2014
2015 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
2016}
2017
Jing Huang8f4bfad2010-12-26 21:50:10 -08002018/*
Krishna Gudipatib7044952010-12-13 16:17:42 -08002019 * Set PID in case of n2n (which is assigned during PLOGI)
2020 */
2021void
2022bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
2023{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002024 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatib7044952010-12-13 16:17:42 -08002025 bfa_trc(lps->bfa, n2n_pid);
2026
2027 lps->lp_pid = n2n_pid;
2028 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
2029}
2030
Jing Huang5fbe25c2010-10-18 17:17:23 -07002031/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002032 * LPS firmware message class handler.
2033 */
2034void
2035bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2036{
2037 union bfi_lps_i2h_msg_u msg;
2038
2039 bfa_trc(bfa, m->mhdr.msg_id);
2040 msg.msg = m;
2041
2042 switch (m->mhdr.msg_id) {
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07002043 case BFI_LPS_I2H_LOGIN_RSP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002044 bfa_lps_login_rsp(bfa, msg.login_rsp);
2045 break;
2046
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07002047 case BFI_LPS_I2H_LOGOUT_RSP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002048 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2049 break;
2050
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07002051 case BFI_LPS_I2H_CVL_EVENT:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002052 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2053 break;
2054
2055 default:
2056 bfa_trc(bfa, m->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08002057 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002058 }
2059}
2060
Krishna Gudipati7826f302011-07-20 16:59:13 -07002061static void
2062bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
2063{
2064 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2065 struct bfa_aen_entry_s *aen_entry;
2066
2067 bfad_get_aen_entry(bfad, aen_entry);
2068 if (!aen_entry)
2069 return;
2070
2071 aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
2072 aen_entry->aen_data.port.pwwn = fcport->pwwn;
2073
2074 /* Send the AEN notification */
2075 bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
2076 BFA_AEN_CAT_PORT, event);
2077}
2078
Jing Huang5fbe25c2010-10-18 17:17:23 -07002079/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002080 * FC PORT state machine functions
2081 */
2082static void
2083bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2084 enum bfa_fcport_sm_event event)
2085{
2086 bfa_trc(fcport->bfa, event);
2087
2088 switch (event) {
2089 case BFA_FCPORT_SM_START:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002090 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002091 * Start event after IOC is configured and BFA is started.
2092 */
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08002093 fcport->use_flash_cfg = BFA_TRUE;
2094
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002095 if (bfa_fcport_send_enable(fcport)) {
2096 bfa_trc(fcport->bfa, BFA_TRUE);
2097 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2098 } else {
2099 bfa_trc(fcport->bfa, BFA_FALSE);
2100 bfa_sm_set_state(fcport,
2101 bfa_fcport_sm_enabling_qwait);
2102 }
2103 break;
2104
2105 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002106 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002107 * Port is persistently configured to be in enabled state. Do
2108 * not change state. Port enabling is done when START event is
2109 * received.
2110 */
2111 break;
2112
2113 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002114 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002115 * If a port is persistently configured to be disabled, the
2116 * first event will a port disable request.
2117 */
2118 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2119 break;
2120
2121 case BFA_FCPORT_SM_HWFAIL:
2122 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2123 break;
2124
2125 default:
2126 bfa_sm_fault(fcport->bfa, event);
2127 }
2128}
2129
2130static void
2131bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2132 enum bfa_fcport_sm_event event)
2133{
2134 char pwwn_buf[BFA_STRING_32];
2135 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2136 bfa_trc(fcport->bfa, event);
2137
2138 switch (event) {
2139 case BFA_FCPORT_SM_QRESUME:
2140 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2141 bfa_fcport_send_enable(fcport);
2142 break;
2143
2144 case BFA_FCPORT_SM_STOP:
2145 bfa_reqq_wcancel(&fcport->reqq_wait);
2146 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2147 break;
2148
2149 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002150 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002151 * Already enable is in progress.
2152 */
2153 break;
2154
2155 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002156 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002157 * Just send disable request to firmware when room becomes
2158 * available in request queue.
2159 */
2160 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2161 bfa_reqq_wcancel(&fcport->reqq_wait);
2162 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2163 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2164 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002165 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002166 "Base port disabled: WWN = %s\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002167 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002168 break;
2169
2170 case BFA_FCPORT_SM_LINKUP:
2171 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002172 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002173 * Possible to get link events when doing back-to-back
2174 * enable/disables.
2175 */
2176 break;
2177
2178 case BFA_FCPORT_SM_HWFAIL:
2179 bfa_reqq_wcancel(&fcport->reqq_wait);
2180 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2181 break;
2182
2183 default:
2184 bfa_sm_fault(fcport->bfa, event);
2185 }
2186}
2187
2188static void
2189bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2190 enum bfa_fcport_sm_event event)
2191{
2192 char pwwn_buf[BFA_STRING_32];
2193 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2194 bfa_trc(fcport->bfa, event);
2195
2196 switch (event) {
2197 case BFA_FCPORT_SM_FWRSP:
2198 case BFA_FCPORT_SM_LINKDOWN:
2199 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2200 break;
2201
2202 case BFA_FCPORT_SM_LINKUP:
2203 bfa_fcport_update_linkinfo(fcport);
2204 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2205
Jing Huangd4b671c2010-12-26 21:46:35 -08002206 WARN_ON(!fcport->event_cbfn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002207 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2208 break;
2209
2210 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002211 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002212 * Already being enabled.
2213 */
2214 break;
2215
2216 case BFA_FCPORT_SM_DISABLE:
2217 if (bfa_fcport_send_disable(fcport))
2218 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2219 else
2220 bfa_sm_set_state(fcport,
2221 bfa_fcport_sm_disabling_qwait);
2222
2223 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2224 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2225 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002226 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002227 "Base port disabled: WWN = %s\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002228 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002229 break;
2230
2231 case BFA_FCPORT_SM_STOP:
2232 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2233 break;
2234
2235 case BFA_FCPORT_SM_HWFAIL:
2236 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2237 break;
2238
2239 default:
2240 bfa_sm_fault(fcport->bfa, event);
2241 }
2242}
2243
2244static void
2245bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2246 enum bfa_fcport_sm_event event)
2247{
2248 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2249 char pwwn_buf[BFA_STRING_32];
2250 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2251
2252 bfa_trc(fcport->bfa, event);
2253
2254 switch (event) {
2255 case BFA_FCPORT_SM_LINKUP:
2256 bfa_fcport_update_linkinfo(fcport);
2257 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
Jing Huangd4b671c2010-12-26 21:46:35 -08002258 WARN_ON(!fcport->event_cbfn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002259 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2260 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2261 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2262
2263 bfa_trc(fcport->bfa,
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07002264 pevent->link_state.attr.vc_fcf.fcf.fipenabled);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002265 bfa_trc(fcport->bfa,
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07002266 pevent->link_state.attr.vc_fcf.fcf.fipfailed);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002267
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07002268 if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002269 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2270 BFA_PL_EID_FIP_FCF_DISC, 0,
2271 "FIP FCF Discovery Failed");
2272 else
2273 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2274 BFA_PL_EID_FIP_FCF_DISC, 0,
2275 "FIP FCF Discovered");
2276 }
2277
2278 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2279 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002280 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002281 "Base port online: WWN = %s\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002282 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
Krishna Gudipati3ec4f2c2011-07-20 17:03:09 -07002283
2284 /* If QoS is enabled and it is not online, send AEN */
2285 if (fcport->cfg.qos_enabled &&
2286 fcport->qos_attr.state != BFA_QOS_ONLINE)
2287 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002288 break;
2289
2290 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002291 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002292 * Possible to get link down event.
2293 */
2294 break;
2295
2296 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002297 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002298 * Already enabled.
2299 */
2300 break;
2301
2302 case BFA_FCPORT_SM_DISABLE:
2303 if (bfa_fcport_send_disable(fcport))
2304 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2305 else
2306 bfa_sm_set_state(fcport,
2307 bfa_fcport_sm_disabling_qwait);
2308
2309 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2310 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2311 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002312 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002313 "Base port disabled: WWN = %s\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002314 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002315 break;
2316
2317 case BFA_FCPORT_SM_STOP:
2318 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2319 break;
2320
2321 case BFA_FCPORT_SM_HWFAIL:
2322 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2323 break;
2324
2325 default:
2326 bfa_sm_fault(fcport->bfa, event);
2327 }
2328}
2329
2330static void
2331bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2332 enum bfa_fcport_sm_event event)
2333{
2334 char pwwn_buf[BFA_STRING_32];
2335 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2336
2337 bfa_trc(fcport->bfa, event);
2338
2339 switch (event) {
2340 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002341 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002342 * Already enabled.
2343 */
2344 break;
2345
2346 case BFA_FCPORT_SM_DISABLE:
2347 if (bfa_fcport_send_disable(fcport))
2348 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2349 else
2350 bfa_sm_set_state(fcport,
2351 bfa_fcport_sm_disabling_qwait);
2352
2353 bfa_fcport_reset_linkinfo(fcport);
2354 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2355 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2356 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2357 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002358 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002359 "Base port offline: WWN = %s\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002360 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
Jing Huang88166242010-12-09 17:11:53 -08002361 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002362 "Base port disabled: WWN = %s\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002363 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002364 break;
2365
2366 case BFA_FCPORT_SM_LINKDOWN:
2367 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2368 bfa_fcport_reset_linkinfo(fcport);
2369 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2370 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2371 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2372 wwn2str(pwwn_buf, fcport->pwwn);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002373 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
Jing Huang88166242010-12-09 17:11:53 -08002374 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002375 "Base port offline: WWN = %s\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002376 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2377 } else {
Jing Huang88166242010-12-09 17:11:53 -08002378 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002379 "Base port (WWN = %s) "
2380 "lost fabric connectivity\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002381 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2382 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002383 break;
2384
2385 case BFA_FCPORT_SM_STOP:
2386 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2387 bfa_fcport_reset_linkinfo(fcport);
2388 wwn2str(pwwn_buf, fcport->pwwn);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002389 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
Jing Huang88166242010-12-09 17:11:53 -08002390 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002391 "Base port offline: WWN = %s\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002392 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2393 } else {
Jing Huang88166242010-12-09 17:11:53 -08002394 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002395 "Base port (WWN = %s) "
2396 "lost fabric connectivity\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002397 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2398 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002399 break;
2400
2401 case BFA_FCPORT_SM_HWFAIL:
2402 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2403 bfa_fcport_reset_linkinfo(fcport);
2404 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2405 wwn2str(pwwn_buf, fcport->pwwn);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002406 if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
Jing Huang88166242010-12-09 17:11:53 -08002407 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002408 "Base port offline: WWN = %s\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002409 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
2410 } else {
Jing Huang88166242010-12-09 17:11:53 -08002411 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002412 "Base port (WWN = %s) "
2413 "lost fabric connectivity\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002414 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
2415 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002416 break;
2417
2418 default:
2419 bfa_sm_fault(fcport->bfa, event);
2420 }
2421}
2422
2423static void
2424bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2425 enum bfa_fcport_sm_event event)
2426{
2427 bfa_trc(fcport->bfa, event);
2428
2429 switch (event) {
2430 case BFA_FCPORT_SM_QRESUME:
2431 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2432 bfa_fcport_send_disable(fcport);
2433 break;
2434
2435 case BFA_FCPORT_SM_STOP:
2436 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2437 bfa_reqq_wcancel(&fcport->reqq_wait);
2438 break;
2439
2440 case BFA_FCPORT_SM_ENABLE:
2441 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2442 break;
2443
2444 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002445 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002446 * Already being disabled.
2447 */
2448 break;
2449
2450 case BFA_FCPORT_SM_LINKUP:
2451 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002452 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002453 * Possible to get link events when doing back-to-back
2454 * enable/disables.
2455 */
2456 break;
2457
2458 case BFA_FCPORT_SM_HWFAIL:
2459 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2460 bfa_reqq_wcancel(&fcport->reqq_wait);
2461 break;
2462
2463 default:
2464 bfa_sm_fault(fcport->bfa, event);
2465 }
2466}
2467
2468static void
2469bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2470 enum bfa_fcport_sm_event event)
2471{
2472 bfa_trc(fcport->bfa, event);
2473
2474 switch (event) {
2475 case BFA_FCPORT_SM_QRESUME:
2476 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2477 bfa_fcport_send_disable(fcport);
2478 if (bfa_fcport_send_enable(fcport))
2479 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2480 else
2481 bfa_sm_set_state(fcport,
2482 bfa_fcport_sm_enabling_qwait);
2483 break;
2484
2485 case BFA_FCPORT_SM_STOP:
2486 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2487 bfa_reqq_wcancel(&fcport->reqq_wait);
2488 break;
2489
2490 case BFA_FCPORT_SM_ENABLE:
2491 break;
2492
2493 case BFA_FCPORT_SM_DISABLE:
2494 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2495 break;
2496
2497 case BFA_FCPORT_SM_LINKUP:
2498 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002499 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002500 * Possible to get link events when doing back-to-back
2501 * enable/disables.
2502 */
2503 break;
2504
2505 case BFA_FCPORT_SM_HWFAIL:
2506 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2507 bfa_reqq_wcancel(&fcport->reqq_wait);
2508 break;
2509
2510 default:
2511 bfa_sm_fault(fcport->bfa, event);
2512 }
2513}
2514
2515static void
2516bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2517 enum bfa_fcport_sm_event event)
2518{
2519 char pwwn_buf[BFA_STRING_32];
2520 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2521 bfa_trc(fcport->bfa, event);
2522
2523 switch (event) {
2524 case BFA_FCPORT_SM_FWRSP:
2525 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2526 break;
2527
2528 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002529 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002530 * Already being disabled.
2531 */
2532 break;
2533
2534 case BFA_FCPORT_SM_ENABLE:
2535 if (bfa_fcport_send_enable(fcport))
2536 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2537 else
2538 bfa_sm_set_state(fcport,
2539 bfa_fcport_sm_enabling_qwait);
2540
2541 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2542 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2543 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002544 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002545 "Base port enabled: WWN = %s\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002546 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002547 break;
2548
2549 case BFA_FCPORT_SM_STOP:
2550 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2551 break;
2552
2553 case BFA_FCPORT_SM_LINKUP:
2554 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002555 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002556 * Possible to get link events when doing back-to-back
2557 * enable/disables.
2558 */
2559 break;
2560
2561 case BFA_FCPORT_SM_HWFAIL:
2562 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2563 break;
2564
2565 default:
2566 bfa_sm_fault(fcport->bfa, event);
2567 }
2568}
2569
2570static void
2571bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2572 enum bfa_fcport_sm_event event)
2573{
2574 char pwwn_buf[BFA_STRING_32];
2575 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2576 bfa_trc(fcport->bfa, event);
2577
2578 switch (event) {
2579 case BFA_FCPORT_SM_START:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002580 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002581 * Ignore start event for a port that is disabled.
2582 */
2583 break;
2584
2585 case BFA_FCPORT_SM_STOP:
2586 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2587 break;
2588
2589 case BFA_FCPORT_SM_ENABLE:
2590 if (bfa_fcport_send_enable(fcport))
2591 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2592 else
2593 bfa_sm_set_state(fcport,
2594 bfa_fcport_sm_enabling_qwait);
2595
2596 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2597 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2598 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002599 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002600 "Base port enabled: WWN = %s\n", pwwn_buf);
Krishna Gudipati7826f302011-07-20 16:59:13 -07002601 bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002602 break;
2603
2604 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002605 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002606 * Already disabled.
2607 */
2608 break;
2609
2610 case BFA_FCPORT_SM_HWFAIL:
2611 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2612 break;
2613
Krishna Gudipatie3535462012-09-21 17:26:07 -07002614 case BFA_FCPORT_SM_DPORTENABLE:
2615 bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
2616 break;
2617
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002618 default:
2619 bfa_sm_fault(fcport->bfa, event);
2620 }
2621}
2622
2623static void
2624bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2625 enum bfa_fcport_sm_event event)
2626{
2627 bfa_trc(fcport->bfa, event);
2628
2629 switch (event) {
2630 case BFA_FCPORT_SM_START:
2631 if (bfa_fcport_send_enable(fcport))
2632 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2633 else
2634 bfa_sm_set_state(fcport,
2635 bfa_fcport_sm_enabling_qwait);
2636 break;
2637
2638 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002639 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002640 * Ignore all other events.
2641 */
2642 ;
2643 }
2644}
2645
Jing Huang5fbe25c2010-10-18 17:17:23 -07002646/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002647 * Port is enabled. IOC is down/failed.
2648 */
2649static void
2650bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2651 enum bfa_fcport_sm_event event)
2652{
2653 bfa_trc(fcport->bfa, event);
2654
2655 switch (event) {
2656 case BFA_FCPORT_SM_START:
2657 if (bfa_fcport_send_enable(fcport))
2658 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2659 else
2660 bfa_sm_set_state(fcport,
2661 bfa_fcport_sm_enabling_qwait);
2662 break;
2663
2664 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002665 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002666 * Ignore all events.
2667 */
2668 ;
2669 }
2670}
2671
Jing Huang5fbe25c2010-10-18 17:17:23 -07002672/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002673 * Port is disabled. IOC is down/failed.
2674 */
2675static void
2676bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2677 enum bfa_fcport_sm_event event)
2678{
2679 bfa_trc(fcport->bfa, event);
2680
2681 switch (event) {
2682 case BFA_FCPORT_SM_START:
2683 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2684 break;
2685
2686 case BFA_FCPORT_SM_ENABLE:
2687 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2688 break;
2689
2690 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002691 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002692 * Ignore all events.
2693 */
2694 ;
2695 }
2696}
2697
Krishna Gudipatie3535462012-09-21 17:26:07 -07002698static void
2699bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
2700{
2701 bfa_trc(fcport->bfa, event);
2702
2703 switch (event) {
2704 case BFA_FCPORT_SM_DPORTENABLE:
2705 case BFA_FCPORT_SM_DISABLE:
2706 case BFA_FCPORT_SM_ENABLE:
2707 case BFA_FCPORT_SM_START:
2708 /*
2709 * Ignore event for a port that is dport
2710 */
2711 break;
2712
2713 case BFA_FCPORT_SM_STOP:
2714 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2715 break;
2716
2717 case BFA_FCPORT_SM_HWFAIL:
2718 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2719 break;
2720
2721 case BFA_FCPORT_SM_DPORTDISABLE:
2722 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2723 break;
2724
2725 default:
2726 bfa_sm_fault(fcport->bfa, event);
2727 }
2728}
2729
Jing Huang5fbe25c2010-10-18 17:17:23 -07002730/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002731 * Link state is down
2732 */
2733static void
2734bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2735 enum bfa_fcport_ln_sm_event event)
2736{
2737 bfa_trc(ln->fcport->bfa, event);
2738
2739 switch (event) {
2740 case BFA_FCPORT_LN_SM_LINKUP:
2741 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2742 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2743 break;
2744
2745 default:
2746 bfa_sm_fault(ln->fcport->bfa, event);
2747 }
2748}
2749
Jing Huang5fbe25c2010-10-18 17:17:23 -07002750/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002751 * Link state is waiting for down notification
2752 */
2753static void
2754bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2755 enum bfa_fcport_ln_sm_event event)
2756{
2757 bfa_trc(ln->fcport->bfa, event);
2758
2759 switch (event) {
2760 case BFA_FCPORT_LN_SM_LINKUP:
2761 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2762 break;
2763
2764 case BFA_FCPORT_LN_SM_NOTIFICATION:
2765 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2766 break;
2767
2768 default:
2769 bfa_sm_fault(ln->fcport->bfa, event);
2770 }
2771}
2772
Jing Huang5fbe25c2010-10-18 17:17:23 -07002773/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002774 * Link state is waiting for down notification and there is a pending up
2775 */
2776static void
2777bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2778 enum bfa_fcport_ln_sm_event event)
2779{
2780 bfa_trc(ln->fcport->bfa, event);
2781
2782 switch (event) {
2783 case BFA_FCPORT_LN_SM_LINKDOWN:
2784 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2785 break;
2786
2787 case BFA_FCPORT_LN_SM_NOTIFICATION:
2788 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2789 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2790 break;
2791
2792 default:
2793 bfa_sm_fault(ln->fcport->bfa, event);
2794 }
2795}
2796
Jing Huang5fbe25c2010-10-18 17:17:23 -07002797/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002798 * Link state is up
2799 */
2800static void
2801bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2802 enum bfa_fcport_ln_sm_event event)
2803{
2804 bfa_trc(ln->fcport->bfa, event);
2805
2806 switch (event) {
2807 case BFA_FCPORT_LN_SM_LINKDOWN:
2808 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2809 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2810 break;
2811
2812 default:
2813 bfa_sm_fault(ln->fcport->bfa, event);
2814 }
2815}
2816
Jing Huang5fbe25c2010-10-18 17:17:23 -07002817/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002818 * Link state is waiting for up notification
2819 */
2820static void
2821bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2822 enum bfa_fcport_ln_sm_event event)
2823{
2824 bfa_trc(ln->fcport->bfa, event);
2825
2826 switch (event) {
2827 case BFA_FCPORT_LN_SM_LINKDOWN:
2828 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2829 break;
2830
2831 case BFA_FCPORT_LN_SM_NOTIFICATION:
2832 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2833 break;
2834
2835 default:
2836 bfa_sm_fault(ln->fcport->bfa, event);
2837 }
2838}
2839
Jing Huang5fbe25c2010-10-18 17:17:23 -07002840/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002841 * Link state is waiting for up notification and there is a pending down
2842 */
2843static void
2844bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2845 enum bfa_fcport_ln_sm_event event)
2846{
2847 bfa_trc(ln->fcport->bfa, event);
2848
2849 switch (event) {
2850 case BFA_FCPORT_LN_SM_LINKUP:
2851 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2852 break;
2853
2854 case BFA_FCPORT_LN_SM_NOTIFICATION:
2855 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2856 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2857 break;
2858
2859 default:
2860 bfa_sm_fault(ln->fcport->bfa, event);
2861 }
2862}
2863
Jing Huang5fbe25c2010-10-18 17:17:23 -07002864/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002865 * Link state is waiting for up notification and there are pending down and up
2866 */
2867static void
2868bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2869 enum bfa_fcport_ln_sm_event event)
2870{
2871 bfa_trc(ln->fcport->bfa, event);
2872
2873 switch (event) {
2874 case BFA_FCPORT_LN_SM_LINKDOWN:
2875 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2876 break;
2877
2878 case BFA_FCPORT_LN_SM_NOTIFICATION:
2879 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2880 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2881 break;
2882
2883 default:
2884 bfa_sm_fault(ln->fcport->bfa, event);
2885 }
2886}
2887
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002888static void
2889__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2890{
2891 struct bfa_fcport_ln_s *ln = cbarg;
2892
2893 if (complete)
2894 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2895 else
2896 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2897}
2898
Jing Huang5fbe25c2010-10-18 17:17:23 -07002899/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002900 * Send SCN notification to upper layers.
2901 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2902 */
2903static void
2904bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2905 bfa_boolean_t trunk)
2906{
2907 if (fcport->cfg.trunked && !trunk)
2908 return;
2909
2910 switch (event) {
2911 case BFA_PORT_LINKUP:
2912 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2913 break;
2914 case BFA_PORT_LINKDOWN:
2915 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2916 break;
2917 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08002918 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002919 }
2920}
2921
2922static void
2923bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2924{
2925 struct bfa_fcport_s *fcport = ln->fcport;
2926
2927 if (fcport->bfa->fcs) {
2928 fcport->event_cbfn(fcport->event_cbarg, event);
2929 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2930 } else {
2931 ln->ln_event = event;
2932 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2933 __bfa_cb_fcport_event, ln);
2934 }
2935}
2936
2937#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2938 BFA_CACHELINE_SZ))
2939
2940static void
Krishna Gudipati45070252011-06-24 20:24:29 -07002941bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2942 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002943{
Krishna Gudipati45070252011-06-24 20:24:29 -07002944 struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2945
2946 bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002947}
2948
2949static void
2950bfa_fcport_qresume(void *cbarg)
2951{
2952 struct bfa_fcport_s *fcport = cbarg;
2953
2954 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2955}
2956
2957static void
Krishna Gudipati45070252011-06-24 20:24:29 -07002958bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002959{
Krishna Gudipati45070252011-06-24 20:24:29 -07002960 struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002961
Krishna Gudipati45070252011-06-24 20:24:29 -07002962 fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
2963 fcport->stats_pa = bfa_mem_dma_phys(fcport_dma);
2964 fcport->stats = (union bfa_fcport_stats_u *)
2965 bfa_mem_dma_virt(fcport_dma);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002966}
2967
Jing Huang5fbe25c2010-10-18 17:17:23 -07002968/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002969 * Memory initialization.
2970 */
2971static void
2972bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07002973 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002974{
2975 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2976 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2977 struct bfa_fcport_ln_s *ln = &fcport->ln;
Maggie Zhangf16a1752010-12-09 19:12:32 -08002978 struct timeval tv;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002979
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002980 fcport->bfa = bfa;
2981 ln->fcport = fcport;
2982
Krishna Gudipati45070252011-06-24 20:24:29 -07002983 bfa_fcport_mem_claim(fcport);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002984
2985 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2986 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2987
Jing Huang5fbe25c2010-10-18 17:17:23 -07002988 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002989 * initialize time stamp for stats reset
2990 */
Maggie Zhangf16a1752010-12-09 19:12:32 -08002991 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002992 fcport->stats_reset_time = tv.tv_sec;
2993
Jing Huang5fbe25c2010-10-18 17:17:23 -07002994 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002995 * initialize and set default configuration
2996 */
2997 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2998 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2999 port_cfg->trunked = BFA_FALSE;
3000 port_cfg->maxfrsize = 0;
3001
3002 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
3003
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003004 INIT_LIST_HEAD(&fcport->stats_pending_q);
3005 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
3006
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003007 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
3008}
3009
3010static void
3011bfa_fcport_detach(struct bfa_s *bfa)
3012{
3013}
3014
Jing Huang5fbe25c2010-10-18 17:17:23 -07003015/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003016 * Called when IOC is ready.
3017 */
3018static void
3019bfa_fcport_start(struct bfa_s *bfa)
3020{
3021 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
3022}
3023
Jing Huang5fbe25c2010-10-18 17:17:23 -07003024/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003025 * Called before IOC is stopped.
3026 */
3027static void
3028bfa_fcport_stop(struct bfa_s *bfa)
3029{
3030 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
3031 bfa_trunk_iocdisable(bfa);
3032}
3033
Jing Huang5fbe25c2010-10-18 17:17:23 -07003034/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003035 * Called when IOC failure is detected.
3036 */
3037static void
3038bfa_fcport_iocdisable(struct bfa_s *bfa)
3039{
3040 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3041
3042 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
3043 bfa_trunk_iocdisable(bfa);
3044}
3045
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07003046/*
3047 * Update loop info in fcport for SCN online
3048 */
3049static void
3050bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
3051 struct bfa_fcport_loop_info_s *loop_info)
3052{
3053 fcport->myalpa = loop_info->myalpa;
3054 fcport->alpabm_valid =
3055 loop_info->alpabm_val;
3056 memcpy(fcport->alpabm.alpa_bm,
3057 loop_info->alpabm.alpa_bm,
3058 sizeof(struct fc_alpabm_s));
3059}
3060
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003061static void
3062bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
3063{
3064 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
3065 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3066
3067 fcport->speed = pevent->link_state.speed;
3068 fcport->topology = pevent->link_state.topology;
3069
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07003070 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
3071 bfa_fcport_update_loop_info(fcport,
3072 &pevent->link_state.attr.loop_info);
3073 return;
3074 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003075
3076 /* QoS Details */
Jing Huang6a18b162010-10-18 17:08:54 -07003077 fcport->qos_attr = pevent->link_state.qos_attr;
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07003078 fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003079
Jing Huang5fbe25c2010-10-18 17:17:23 -07003080 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003081 * update trunk state if applicable
3082 */
3083 if (!fcport->cfg.trunked)
3084 trunk->attr.state = BFA_TRUNK_DISABLED;
3085
3086 /* update FCoE specific */
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07003087 fcport->fcoe_vlan =
3088 be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003089
3090 bfa_trc(fcport->bfa, fcport->speed);
3091 bfa_trc(fcport->bfa, fcport->topology);
3092}
3093
3094static void
3095bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3096{
3097 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3098 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003099 fcport->bbsc_op_state = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003100}
3101
Jing Huang5fbe25c2010-10-18 17:17:23 -07003102/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003103 * Send port enable message to firmware.
3104 */
3105static bfa_boolean_t
3106bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3107{
3108 struct bfi_fcport_enable_req_s *m;
3109
Jing Huang5fbe25c2010-10-18 17:17:23 -07003110 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003111 * Increment message tag before queue check, so that responses to old
3112 * requests are discarded.
3113 */
3114 fcport->msgtag++;
3115
Jing Huang5fbe25c2010-10-18 17:17:23 -07003116 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003117 * check for room in queue to send request now
3118 */
3119 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3120 if (!m) {
3121 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3122 &fcport->reqq_wait);
3123 return BFA_FALSE;
3124 }
3125
3126 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003127 bfa_fn_lpu(fcport->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003128 m->nwwn = fcport->nwwn;
3129 m->pwwn = fcport->pwwn;
3130 m->port_cfg = fcport->cfg;
3131 m->msgtag = fcport->msgtag;
Jing Huangba816ea2010-10-18 17:10:50 -07003132 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08003133 m->use_flash_cfg = fcport->use_flash_cfg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003134 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3135 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3136 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3137
Jing Huang5fbe25c2010-10-18 17:17:23 -07003138 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003139 * queue I/O message to firmware
3140 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003141 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003142 return BFA_TRUE;
3143}
3144
Jing Huang5fbe25c2010-10-18 17:17:23 -07003145/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003146 * Send port disable message to firmware.
3147 */
3148static bfa_boolean_t
3149bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3150{
3151 struct bfi_fcport_req_s *m;
3152
Jing Huang5fbe25c2010-10-18 17:17:23 -07003153 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003154 * Increment message tag before queue check, so that responses to old
3155 * requests are discarded.
3156 */
3157 fcport->msgtag++;
3158
Jing Huang5fbe25c2010-10-18 17:17:23 -07003159 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003160 * check for room in queue to send request now
3161 */
3162 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3163 if (!m) {
3164 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3165 &fcport->reqq_wait);
3166 return BFA_FALSE;
3167 }
3168
3169 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003170 bfa_fn_lpu(fcport->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003171 m->msgtag = fcport->msgtag;
3172
Jing Huang5fbe25c2010-10-18 17:17:23 -07003173 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003174 * queue I/O message to firmware
3175 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003176 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003177
3178 return BFA_TRUE;
3179}
3180
3181static void
3182bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3183{
Maggie Zhangf7f738122010-12-09 19:08:43 -08003184 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3185 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003186
3187 bfa_trc(fcport->bfa, fcport->pwwn);
3188 bfa_trc(fcport->bfa, fcport->nwwn);
3189}
3190
3191static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003192bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3193 struct bfa_qos_stats_s *s)
3194{
3195 u32 *dip = (u32 *) d;
Maggie50444a32010-11-29 18:26:32 -08003196 __be32 *sip = (__be32 *) s;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003197 int i;
3198
3199 /* Now swap the 32 bit fields */
3200 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
Jing Huangba816ea2010-10-18 17:10:50 -07003201 dip[i] = be32_to_cpu(sip[i]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003202}
3203
3204static void
3205bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3206 struct bfa_fcoe_stats_s *s)
3207{
3208 u32 *dip = (u32 *) d;
Maggie50444a32010-11-29 18:26:32 -08003209 __be32 *sip = (__be32 *) s;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003210 int i;
3211
3212 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3213 i = i + 2) {
Maggie Zhangf16a1752010-12-09 19:12:32 -08003214#ifdef __BIG_ENDIAN
Jing Huangba816ea2010-10-18 17:10:50 -07003215 dip[i] = be32_to_cpu(sip[i]);
3216 dip[i + 1] = be32_to_cpu(sip[i + 1]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003217#else
Jing Huangba816ea2010-10-18 17:10:50 -07003218 dip[i] = be32_to_cpu(sip[i + 1]);
3219 dip[i + 1] = be32_to_cpu(sip[i]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003220#endif
3221 }
3222}
3223
3224static void
3225__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3226{
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003227 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
3228 struct bfa_cb_pending_q_s *cb;
3229 struct list_head *qe, *qen;
3230 union bfa_fcport_stats_u *ret;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003231
3232 if (complete) {
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003233 struct timeval tv;
3234 if (fcport->stats_status == BFA_STATUS_OK)
3235 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003236
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003237 list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
3238 bfa_q_deq(&fcport->stats_pending_q, &qe);
3239 cb = (struct bfa_cb_pending_q_s *)qe;
3240 if (fcport->stats_status == BFA_STATUS_OK) {
3241 ret = (union bfa_fcport_stats_u *)cb->data;
3242 /* Swap FC QoS or FCoE stats */
3243 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
3244 bfa_fcport_qos_stats_swap(&ret->fcqos,
3245 &fcport->stats->fcqos);
3246 else {
3247 bfa_fcport_fcoe_stats_swap(&ret->fcoe,
3248 &fcport->stats->fcoe);
3249 ret->fcoe.secs_reset =
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003250 tv.tv_sec - fcport->stats_reset_time;
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003251 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003252 }
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003253 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3254 fcport->stats_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003255 }
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003256 fcport->stats_status = BFA_STATUS_OK;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003257 } else {
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003258 INIT_LIST_HEAD(&fcport->stats_pending_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003259 fcport->stats_status = BFA_STATUS_OK;
3260 }
3261}
3262
3263static void
3264bfa_fcport_stats_get_timeout(void *cbarg)
3265{
3266 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3267
3268 bfa_trc(fcport->bfa, fcport->stats_qfull);
3269
3270 if (fcport->stats_qfull) {
3271 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3272 fcport->stats_qfull = BFA_FALSE;
3273 }
3274
3275 fcport->stats_status = BFA_STATUS_ETIMER;
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003276 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003277}
3278
3279static void
3280bfa_fcport_send_stats_get(void *cbarg)
3281{
3282 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3283 struct bfi_fcport_req_s *msg;
3284
3285 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3286
3287 if (!msg) {
3288 fcport->stats_qfull = BFA_TRUE;
3289 bfa_reqq_winit(&fcport->stats_reqq_wait,
3290 bfa_fcport_send_stats_get, fcport);
3291 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3292 &fcport->stats_reqq_wait);
3293 return;
3294 }
3295 fcport->stats_qfull = BFA_FALSE;
3296
Jing Huang6a18b162010-10-18 17:08:54 -07003297 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003298 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003299 bfa_fn_lpu(fcport->bfa));
3300 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003301}
3302
3303static void
3304__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3305{
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003306 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3307 struct bfa_cb_pending_q_s *cb;
3308 struct list_head *qe, *qen;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003309
3310 if (complete) {
Maggie Zhangf16a1752010-12-09 19:12:32 -08003311 struct timeval tv;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003312
Jing Huang5fbe25c2010-10-18 17:17:23 -07003313 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003314 * re-initialize time stamp for stats reset
3315 */
Maggie Zhangf16a1752010-12-09 19:12:32 -08003316 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003317 fcport->stats_reset_time = tv.tv_sec;
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003318 list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
3319 bfa_q_deq(&fcport->statsclr_pending_q, &qe);
3320 cb = (struct bfa_cb_pending_q_s *)qe;
3321 bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
3322 fcport->stats_status);
3323 }
3324 fcport->stats_status = BFA_STATUS_OK;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003325 } else {
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003326 INIT_LIST_HEAD(&fcport->statsclr_pending_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003327 fcport->stats_status = BFA_STATUS_OK;
3328 }
3329}
3330
3331static void
3332bfa_fcport_stats_clr_timeout(void *cbarg)
3333{
3334 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3335
3336 bfa_trc(fcport->bfa, fcport->stats_qfull);
3337
3338 if (fcport->stats_qfull) {
3339 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3340 fcport->stats_qfull = BFA_FALSE;
3341 }
3342
3343 fcport->stats_status = BFA_STATUS_ETIMER;
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003344 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003345}
3346
3347static void
3348bfa_fcport_send_stats_clear(void *cbarg)
3349{
3350 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3351 struct bfi_fcport_req_s *msg;
3352
3353 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3354
3355 if (!msg) {
3356 fcport->stats_qfull = BFA_TRUE;
3357 bfa_reqq_winit(&fcport->stats_reqq_wait,
3358 bfa_fcport_send_stats_clear, fcport);
3359 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3360 &fcport->stats_reqq_wait);
3361 return;
3362 }
3363 fcport->stats_qfull = BFA_FALSE;
3364
Jing Huang6a18b162010-10-18 17:08:54 -07003365 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003366 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003367 bfa_fn_lpu(fcport->bfa));
3368 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003369}
3370
Jing Huang5fbe25c2010-10-18 17:17:23 -07003371/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003372 * Handle trunk SCN event from firmware.
3373 */
3374static void
3375bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3376{
3377 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3378 struct bfi_fcport_trunk_link_s *tlink;
3379 struct bfa_trunk_link_attr_s *lattr;
3380 enum bfa_trunk_state state_prev;
3381 int i;
3382 int link_bm = 0;
3383
3384 bfa_trc(fcport->bfa, fcport->cfg.trunked);
Jing Huangd4b671c2010-12-26 21:46:35 -08003385 WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3386 scn->trunk_state != BFA_TRUNK_OFFLINE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003387
3388 bfa_trc(fcport->bfa, trunk->attr.state);
3389 bfa_trc(fcport->bfa, scn->trunk_state);
3390 bfa_trc(fcport->bfa, scn->trunk_speed);
3391
Jing Huang5fbe25c2010-10-18 17:17:23 -07003392 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003393 * Save off new state for trunk attribute query
3394 */
3395 state_prev = trunk->attr.state;
3396 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3397 trunk->attr.state = scn->trunk_state;
3398 trunk->attr.speed = scn->trunk_speed;
3399 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3400 lattr = &trunk->attr.link_attr[i];
3401 tlink = &scn->tlink[i];
3402
3403 lattr->link_state = tlink->state;
3404 lattr->trunk_wwn = tlink->trunk_wwn;
3405 lattr->fctl = tlink->fctl;
3406 lattr->speed = tlink->speed;
Jing Huangba816ea2010-10-18 17:10:50 -07003407 lattr->deskew = be32_to_cpu(tlink->deskew);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003408
3409 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3410 fcport->speed = tlink->speed;
3411 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3412 link_bm |= 1 << i;
3413 }
3414
3415 bfa_trc(fcport->bfa, lattr->link_state);
3416 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3417 bfa_trc(fcport->bfa, lattr->fctl);
3418 bfa_trc(fcport->bfa, lattr->speed);
3419 bfa_trc(fcport->bfa, lattr->deskew);
3420 }
3421
3422 switch (link_bm) {
3423 case 3:
3424 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3425 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3426 break;
3427 case 2:
3428 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3429 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3430 break;
3431 case 1:
3432 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3433 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3434 break;
3435 default:
3436 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3437 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3438 }
3439
Jing Huang5fbe25c2010-10-18 17:17:23 -07003440 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003441 * Notify upper layers if trunk state changed.
3442 */
3443 if ((state_prev != trunk->attr.state) ||
3444 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3445 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3446 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3447 }
3448}
3449
3450static void
3451bfa_trunk_iocdisable(struct bfa_s *bfa)
3452{
3453 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3454 int i = 0;
3455
Jing Huang5fbe25c2010-10-18 17:17:23 -07003456 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003457 * In trunked mode, notify upper layers that link is down
3458 */
3459 if (fcport->cfg.trunked) {
3460 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3461 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3462
3463 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3464 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3465 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3466 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3467 fcport->trunk.attr.link_attr[i].fctl =
3468 BFA_TRUNK_LINK_FCTL_NORMAL;
3469 fcport->trunk.attr.link_attr[i].link_state =
3470 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3471 fcport->trunk.attr.link_attr[i].speed =
3472 BFA_PORT_SPEED_UNKNOWN;
3473 fcport->trunk.attr.link_attr[i].deskew = 0;
3474 }
3475 }
3476}
3477
Jing Huang5fbe25c2010-10-18 17:17:23 -07003478/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003479 * Called to initialize port attributes
3480 */
3481void
3482bfa_fcport_init(struct bfa_s *bfa)
3483{
3484 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3485
Jing Huang5fbe25c2010-10-18 17:17:23 -07003486 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003487 * Initialize port attributes from IOC hardware data.
3488 */
3489 bfa_fcport_set_wwns(fcport);
3490 if (fcport->cfg.maxfrsize == 0)
3491 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3492 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3493 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3494
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003495 if (bfa_fcport_is_pbcdisabled(bfa))
3496 bfa->modules.port.pbc_disabled = BFA_TRUE;
3497
Jing Huangd4b671c2010-12-26 21:46:35 -08003498 WARN_ON(!fcport->cfg.maxfrsize);
3499 WARN_ON(!fcport->cfg.rx_bbcredit);
3500 WARN_ON(!fcport->speed_sup);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003501}
3502
Jing Huang5fbe25c2010-10-18 17:17:23 -07003503/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003504 * Firmware message handler.
3505 */
3506void
3507bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3508{
3509 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3510 union bfi_fcport_i2h_msg_u i2hmsg;
3511
3512 i2hmsg.msg = msg;
3513 fcport->event_arg.i2hmsg = i2hmsg;
3514
3515 bfa_trc(bfa, msg->mhdr.msg_id);
3516 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3517
3518 switch (msg->mhdr.msg_id) {
3519 case BFI_FCPORT_I2H_ENABLE_RSP:
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08003520 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3521
3522 if (fcport->use_flash_cfg) {
3523 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3524 fcport->cfg.maxfrsize =
3525 cpu_to_be16(fcport->cfg.maxfrsize);
3526 fcport->cfg.path_tov =
3527 cpu_to_be16(fcport->cfg.path_tov);
3528 fcport->cfg.q_depth =
3529 cpu_to_be16(fcport->cfg.q_depth);
3530
3531 if (fcport->cfg.trunked)
3532 fcport->trunk.attr.state =
3533 BFA_TRUNK_OFFLINE;
3534 else
3535 fcport->trunk.attr.state =
3536 BFA_TRUNK_DISABLED;
3537 fcport->use_flash_cfg = BFA_FALSE;
3538 }
3539
Krishna Gudipati3ec4f2c2011-07-20 17:03:09 -07003540 if (fcport->cfg.qos_enabled)
3541 fcport->qos_attr.state = BFA_QOS_OFFLINE;
3542 else
3543 fcport->qos_attr.state = BFA_QOS_DISABLED;
3544
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003545 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08003546 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003547 break;
3548
3549 case BFI_FCPORT_I2H_DISABLE_RSP:
3550 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3551 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3552 break;
3553
3554 case BFI_FCPORT_I2H_EVENT:
3555 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3556 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3557 else
3558 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3559 break;
3560
3561 case BFI_FCPORT_I2H_TRUNK_SCN:
3562 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3563 break;
3564
3565 case BFI_FCPORT_I2H_STATS_GET_RSP:
3566 /*
3567 * check for timer pop before processing the rsp
3568 */
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003569 if (list_empty(&fcport->stats_pending_q) ||
3570 (fcport->stats_status == BFA_STATUS_ETIMER))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003571 break;
3572
3573 bfa_timer_stop(&fcport->timer);
3574 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003575 __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003576 break;
3577
3578 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3579 /*
3580 * check for timer pop before processing the rsp
3581 */
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003582 if (list_empty(&fcport->statsclr_pending_q) ||
3583 (fcport->stats_status == BFA_STATUS_ETIMER))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003584 break;
3585
3586 bfa_timer_stop(&fcport->timer);
3587 fcport->stats_status = BFA_STATUS_OK;
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003588 __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003589 break;
3590
3591 case BFI_FCPORT_I2H_ENABLE_AEN:
3592 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3593 break;
3594
3595 case BFI_FCPORT_I2H_DISABLE_AEN:
3596 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3597 break;
3598
3599 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08003600 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003601 break;
3602 }
3603}
3604
Jing Huang5fbe25c2010-10-18 17:17:23 -07003605/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003606 * Registered callback for port events.
3607 */
3608void
3609bfa_fcport_event_register(struct bfa_s *bfa,
3610 void (*cbfn) (void *cbarg,
3611 enum bfa_port_linkstate event),
3612 void *cbarg)
3613{
3614 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3615
3616 fcport->event_cbfn = cbfn;
3617 fcport->event_cbarg = cbarg;
3618}
3619
3620bfa_status_t
3621bfa_fcport_enable(struct bfa_s *bfa)
3622{
3623 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3624
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003625 if (bfa_fcport_is_pbcdisabled(bfa))
3626 return BFA_STATUS_PBC;
3627
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003628 if (bfa_ioc_is_disabled(&bfa->ioc))
3629 return BFA_STATUS_IOC_DISABLED;
3630
3631 if (fcport->diag_busy)
3632 return BFA_STATUS_DIAG_BUSY;
3633
3634 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3635 return BFA_STATUS_OK;
3636}
3637
3638bfa_status_t
3639bfa_fcport_disable(struct bfa_s *bfa)
3640{
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003641 if (bfa_fcport_is_pbcdisabled(bfa))
3642 return BFA_STATUS_PBC;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003643
3644 if (bfa_ioc_is_disabled(&bfa->ioc))
3645 return BFA_STATUS_IOC_DISABLED;
3646
3647 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3648 return BFA_STATUS_OK;
3649}
3650
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003651/* If PBC is disabled on port, return error */
3652bfa_status_t
3653bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3654{
3655 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3656 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3657 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3658
3659 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3660 bfa_trc(bfa, fcport->pwwn);
3661 return BFA_STATUS_PBC;
3662 }
3663 return BFA_STATUS_OK;
3664}
3665
Jing Huang5fbe25c2010-10-18 17:17:23 -07003666/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003667 * Configure port speed.
3668 */
3669bfa_status_t
3670bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3671{
3672 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3673
3674 bfa_trc(bfa, speed);
3675
3676 if (fcport->cfg.trunked == BFA_TRUE)
3677 return BFA_STATUS_TRUNK_ENABLED;
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07003678 if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
3679 (speed == BFA_PORT_SPEED_16GBPS))
3680 return BFA_STATUS_UNSUPP_SPEED;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003681 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3682 bfa_trc(bfa, fcport->speed_sup);
3683 return BFA_STATUS_UNSUPP_SPEED;
3684 }
3685
Krishna Gudipatibd5a0262012-03-13 17:41:02 -07003686 /* Port speed entered needs to be checked */
3687 if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3688 /* For CT2, 1G is not supported */
3689 if ((speed == BFA_PORT_SPEED_1GBPS) &&
3690 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3691 return BFA_STATUS_UNSUPP_SPEED;
Krishna Gudipatia7141342011-06-24 20:23:19 -07003692
Krishna Gudipatibd5a0262012-03-13 17:41:02 -07003693 /* Already checked for Auto Speed and Max Speed supp */
3694 if (!(speed == BFA_PORT_SPEED_1GBPS ||
3695 speed == BFA_PORT_SPEED_2GBPS ||
3696 speed == BFA_PORT_SPEED_4GBPS ||
3697 speed == BFA_PORT_SPEED_8GBPS ||
3698 speed == BFA_PORT_SPEED_16GBPS ||
3699 speed == BFA_PORT_SPEED_AUTO))
3700 return BFA_STATUS_UNSUPP_SPEED;
3701 } else {
3702 if (speed != BFA_PORT_SPEED_10GBPS)
3703 return BFA_STATUS_UNSUPP_SPEED;
Krishna Gudipatia7141342011-06-24 20:23:19 -07003704 }
3705
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003706 fcport->cfg.speed = speed;
3707
3708 return BFA_STATUS_OK;
3709}
3710
Jing Huang5fbe25c2010-10-18 17:17:23 -07003711/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003712 * Get current speed.
3713 */
3714enum bfa_port_speed
3715bfa_fcport_get_speed(struct bfa_s *bfa)
3716{
3717 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3718
3719 return fcport->speed;
3720}
3721
Jing Huang5fbe25c2010-10-18 17:17:23 -07003722/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003723 * Configure port topology.
3724 */
3725bfa_status_t
3726bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3727{
3728 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3729
3730 bfa_trc(bfa, topology);
3731 bfa_trc(bfa, fcport->cfg.topology);
3732
3733 switch (topology) {
3734 case BFA_PORT_TOPOLOGY_P2P:
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07003735 break;
3736
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003737 case BFA_PORT_TOPOLOGY_LOOP:
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07003738 if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
3739 (fcport->qos_attr.state != BFA_QOS_DISABLED))
3740 return BFA_STATUS_ERROR_QOS_ENABLED;
3741 if (fcport->cfg.ratelimit != BFA_FALSE)
3742 return BFA_STATUS_ERROR_TRL_ENABLED;
3743 if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
3744 (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
3745 return BFA_STATUS_ERROR_TRUNK_ENABLED;
3746 if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
3747 (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
3748 return BFA_STATUS_UNSUPP_SPEED;
3749 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
3750 return BFA_STATUS_LOOP_UNSUPP_MEZZ;
Krishna Gudipatie3535462012-09-21 17:26:07 -07003751 if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
3752 return BFA_STATUS_DPORT_ERR;
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07003753 break;
3754
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003755 case BFA_PORT_TOPOLOGY_AUTO:
3756 break;
3757
3758 default:
3759 return BFA_STATUS_EINVAL;
3760 }
3761
3762 fcport->cfg.topology = topology;
3763 return BFA_STATUS_OK;
3764}
3765
Jing Huang5fbe25c2010-10-18 17:17:23 -07003766/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003767 * Get current topology.
3768 */
3769enum bfa_port_topology
3770bfa_fcport_get_topology(struct bfa_s *bfa)
3771{
3772 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3773
3774 return fcport->topology;
3775}
3776
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07003777/**
3778 * Get config topology.
3779 */
3780enum bfa_port_topology
3781bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
3782{
3783 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3784
3785 return fcport->cfg.topology;
3786}
3787
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003788bfa_status_t
3789bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3790{
3791 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3792
3793 bfa_trc(bfa, alpa);
3794 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3795 bfa_trc(bfa, fcport->cfg.hardalpa);
3796
3797 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3798 fcport->cfg.hardalpa = alpa;
3799
3800 return BFA_STATUS_OK;
3801}
3802
3803bfa_status_t
3804bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3805{
3806 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3807
3808 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3809 bfa_trc(bfa, fcport->cfg.hardalpa);
3810
3811 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3812 return BFA_STATUS_OK;
3813}
3814
3815bfa_boolean_t
3816bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3817{
3818 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3819
3820 *alpa = fcport->cfg.hardalpa;
3821 return fcport->cfg.cfg_hardalpa;
3822}
3823
3824u8
3825bfa_fcport_get_myalpa(struct bfa_s *bfa)
3826{
3827 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3828
3829 return fcport->myalpa;
3830}
3831
3832bfa_status_t
3833bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3834{
3835 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3836
3837 bfa_trc(bfa, maxfrsize);
3838 bfa_trc(bfa, fcport->cfg.maxfrsize);
3839
3840 /* with in range */
3841 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3842 return BFA_STATUS_INVLD_DFSZ;
3843
3844 /* power of 2, if not the max frame size of 2112 */
3845 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3846 return BFA_STATUS_INVLD_DFSZ;
3847
3848 fcport->cfg.maxfrsize = maxfrsize;
3849 return BFA_STATUS_OK;
3850}
3851
3852u16
3853bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3854{
3855 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3856
3857 return fcport->cfg.maxfrsize;
3858}
3859
3860u8
3861bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3862{
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07003863 if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
3864 return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003865
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07003866 else
3867 return 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003868}
3869
3870void
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003871bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003872{
3873 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3874
3875 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003876 fcport->cfg.bb_scn = bb_scn;
3877 if (bb_scn)
3878 fcport->bbsc_op_state = BFA_TRUE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003879}
3880
Jing Huang5fbe25c2010-10-18 17:17:23 -07003881/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003882 * Get port attributes.
3883 */
3884
3885wwn_t
3886bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3887{
3888 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3889 if (node)
3890 return fcport->nwwn;
3891 else
3892 return fcport->pwwn;
3893}
3894
3895void
3896bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3897{
3898 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3899
Jing Huang6a18b162010-10-18 17:08:54 -07003900 memset(attr, 0, sizeof(struct bfa_port_attr_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003901
3902 attr->nwwn = fcport->nwwn;
3903 attr->pwwn = fcport->pwwn;
3904
Maggie Zhangf7f738122010-12-09 19:08:43 -08003905 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3906 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003907
Jing Huang6a18b162010-10-18 17:08:54 -07003908 memcpy(&attr->pport_cfg, &fcport->cfg,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003909 sizeof(struct bfa_port_cfg_s));
3910 /* speed attributes */
3911 attr->pport_cfg.speed = fcport->cfg.speed;
3912 attr->speed_supported = fcport->speed_sup;
3913 attr->speed = fcport->speed;
3914 attr->cos_supported = FC_CLASS_3;
3915
3916 /* topology attributes */
3917 attr->pport_cfg.topology = fcport->cfg.topology;
3918 attr->topology = fcport->topology;
3919 attr->pport_cfg.trunked = fcport->cfg.trunked;
3920
3921 /* beacon attributes */
3922 attr->beacon = fcport->beacon;
3923 attr->link_e2e_beacon = fcport->link_e2e_beacon;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003924
3925 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3926 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3927 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003928 attr->bbsc_op_status = fcport->bbsc_op_state;
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003929
3930 /* PBC Disabled State */
3931 if (bfa_fcport_is_pbcdisabled(bfa))
3932 attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3933 else {
3934 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3935 attr->port_state = BFA_PORT_ST_IOCDIS;
3936 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3937 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3938 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003939
3940 /* FCoE vlan */
3941 attr->fcoe_vlan = fcport->fcoe_vlan;
3942}
3943
3944#define BFA_FCPORT_STATS_TOV 1000
3945
Jing Huang5fbe25c2010-10-18 17:17:23 -07003946/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003947 * Fetch port statistics (FCQoS or FCoE).
3948 */
3949bfa_status_t
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003950bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003951{
3952 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3953
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003954 if (bfa_ioc_is_disabled(&bfa->ioc))
3955 return BFA_STATUS_IOC_DISABLED;
3956
3957 if (!list_empty(&fcport->statsclr_pending_q))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003958 return BFA_STATUS_DEVBUSY;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003959
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003960 if (list_empty(&fcport->stats_pending_q)) {
3961 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
3962 bfa_fcport_send_stats_get(fcport);
3963 bfa_timer_start(bfa, &fcport->timer,
3964 bfa_fcport_stats_get_timeout,
3965 fcport, BFA_FCPORT_STATS_TOV);
3966 } else
3967 list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003968
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003969 return BFA_STATUS_OK;
3970}
3971
Jing Huang5fbe25c2010-10-18 17:17:23 -07003972/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003973 * Reset port statistics (FCQoS or FCoE).
3974 */
3975bfa_status_t
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003976bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003977{
3978 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3979
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003980 if (!list_empty(&fcport->stats_pending_q))
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003981 return BFA_STATUS_DEVBUSY;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003982
Krishna Gudipati37ea0552011-07-20 17:02:11 -07003983 if (list_empty(&fcport->statsclr_pending_q)) {
3984 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
3985 bfa_fcport_send_stats_clear(fcport);
3986 bfa_timer_start(bfa, &fcport->timer,
3987 bfa_fcport_stats_clr_timeout,
3988 fcport, BFA_FCPORT_STATS_TOV);
3989 } else
3990 list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003991
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003992 return BFA_STATUS_OK;
3993}
3994
Jing Huang5fbe25c2010-10-18 17:17:23 -07003995/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003996 * Fetch port attributes.
3997 */
3998bfa_boolean_t
3999bfa_fcport_is_disabled(struct bfa_s *bfa)
4000{
4001 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4002
4003 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4004 BFA_PORT_ST_DISABLED;
4005
4006}
4007
4008bfa_boolean_t
Krishna Gudipatie3535462012-09-21 17:26:07 -07004009bfa_fcport_is_dport(struct bfa_s *bfa)
4010{
4011 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4012
4013 return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
4014 BFA_PORT_ST_DPORT);
4015}
4016
4017bfa_boolean_t
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004018bfa_fcport_is_ratelim(struct bfa_s *bfa)
4019{
4020 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4021
4022 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
4023
4024}
4025
Jing Huang5fbe25c2010-10-18 17:17:23 -07004026/*
Krishna Gudipatia7141342011-06-24 20:23:19 -07004027 * Enable/Disable FAA feature in port config
4028 */
4029void
4030bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
4031{
4032 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4033
4034 bfa_trc(bfa, state);
4035 fcport->cfg.faa_state = state;
4036}
4037
4038/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004039 * Get default minimum ratelim speed
4040 */
4041enum bfa_port_speed
4042bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
4043{
4044 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4045
4046 bfa_trc(bfa, fcport->cfg.trl_def_speed);
4047 return fcport->cfg.trl_def_speed;
4048
4049}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004050
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07004051void
4052bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4053 bfa_boolean_t link_e2e_beacon)
4054{
4055 struct bfa_s *bfa = dev;
4056 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4057
4058 bfa_trc(bfa, beacon);
4059 bfa_trc(bfa, link_e2e_beacon);
4060 bfa_trc(bfa, fcport->beacon);
4061 bfa_trc(bfa, fcport->link_e2e_beacon);
4062
4063 fcport->beacon = beacon;
4064 fcport->link_e2e_beacon = link_e2e_beacon;
4065}
4066
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004067bfa_boolean_t
4068bfa_fcport_is_linkup(struct bfa_s *bfa)
4069{
4070 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4071
4072 return (!fcport->cfg.trunked &&
4073 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4074 (fcport->cfg.trunked &&
4075 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4076}
4077
4078bfa_boolean_t
4079bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4080{
4081 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4082
4083 return fcport->cfg.qos_enabled;
4084}
4085
Krishna Gudipatibe540a92011-06-13 15:53:04 -07004086bfa_boolean_t
4087bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
4088{
4089 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4090
4091 return fcport->cfg.trunked;
4092}
4093
Krishna Gudipatie3535462012-09-21 17:26:07 -07004094void
4095bfa_fcport_dportenable(struct bfa_s *bfa)
4096{
4097 /*
4098 * Assume caller check for port is in disable state
4099 */
4100 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
4101 bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
4102}
4103
4104void
4105bfa_fcport_dportdisable(struct bfa_s *bfa)
4106{
4107 /*
4108 * Assume caller check for port is in disable state
4109 */
4110 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
4111 bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
4112}
4113
Jing Huang5fbe25c2010-10-18 17:17:23 -07004114/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004115 * Rport State machine functions
4116 */
Jing Huang5fbe25c2010-10-18 17:17:23 -07004117/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004118 * Beginning state, only online event expected.
4119 */
4120static void
4121bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4122{
4123 bfa_trc(rp->bfa, rp->rport_tag);
4124 bfa_trc(rp->bfa, event);
4125
4126 switch (event) {
4127 case BFA_RPORT_SM_CREATE:
4128 bfa_stats(rp, sm_un_cr);
4129 bfa_sm_set_state(rp, bfa_rport_sm_created);
4130 break;
4131
4132 default:
4133 bfa_stats(rp, sm_un_unexp);
4134 bfa_sm_fault(rp->bfa, event);
4135 }
4136}
4137
4138static void
4139bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4140{
4141 bfa_trc(rp->bfa, rp->rport_tag);
4142 bfa_trc(rp->bfa, event);
4143
4144 switch (event) {
4145 case BFA_RPORT_SM_ONLINE:
4146 bfa_stats(rp, sm_cr_on);
4147 if (bfa_rport_send_fwcreate(rp))
4148 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4149 else
4150 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4151 break;
4152
4153 case BFA_RPORT_SM_DELETE:
4154 bfa_stats(rp, sm_cr_del);
4155 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4156 bfa_rport_free(rp);
4157 break;
4158
4159 case BFA_RPORT_SM_HWFAIL:
4160 bfa_stats(rp, sm_cr_hwf);
4161 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4162 break;
4163
4164 default:
4165 bfa_stats(rp, sm_cr_unexp);
4166 bfa_sm_fault(rp->bfa, event);
4167 }
4168}
4169
Jing Huang5fbe25c2010-10-18 17:17:23 -07004170/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004171 * Waiting for rport create response from firmware.
4172 */
4173static void
4174bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4175{
4176 bfa_trc(rp->bfa, rp->rport_tag);
4177 bfa_trc(rp->bfa, event);
4178
4179 switch (event) {
4180 case BFA_RPORT_SM_FWRSP:
4181 bfa_stats(rp, sm_fwc_rsp);
4182 bfa_sm_set_state(rp, bfa_rport_sm_online);
4183 bfa_rport_online_cb(rp);
4184 break;
4185
4186 case BFA_RPORT_SM_DELETE:
4187 bfa_stats(rp, sm_fwc_del);
4188 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4189 break;
4190
4191 case BFA_RPORT_SM_OFFLINE:
4192 bfa_stats(rp, sm_fwc_off);
4193 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4194 break;
4195
4196 case BFA_RPORT_SM_HWFAIL:
4197 bfa_stats(rp, sm_fwc_hwf);
4198 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4199 break;
4200
4201 default:
4202 bfa_stats(rp, sm_fwc_unexp);
4203 bfa_sm_fault(rp->bfa, event);
4204 }
4205}
4206
Jing Huang5fbe25c2010-10-18 17:17:23 -07004207/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004208 * Request queue is full, awaiting queue resume to send create request.
4209 */
4210static void
4211bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4212{
4213 bfa_trc(rp->bfa, rp->rport_tag);
4214 bfa_trc(rp->bfa, event);
4215
4216 switch (event) {
4217 case BFA_RPORT_SM_QRESUME:
4218 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4219 bfa_rport_send_fwcreate(rp);
4220 break;
4221
4222 case BFA_RPORT_SM_DELETE:
4223 bfa_stats(rp, sm_fwc_del);
4224 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4225 bfa_reqq_wcancel(&rp->reqq_wait);
4226 bfa_rport_free(rp);
4227 break;
4228
4229 case BFA_RPORT_SM_OFFLINE:
4230 bfa_stats(rp, sm_fwc_off);
4231 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4232 bfa_reqq_wcancel(&rp->reqq_wait);
4233 bfa_rport_offline_cb(rp);
4234 break;
4235
4236 case BFA_RPORT_SM_HWFAIL:
4237 bfa_stats(rp, sm_fwc_hwf);
4238 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4239 bfa_reqq_wcancel(&rp->reqq_wait);
4240 break;
4241
4242 default:
4243 bfa_stats(rp, sm_fwc_unexp);
4244 bfa_sm_fault(rp->bfa, event);
4245 }
4246}
4247
Jing Huang5fbe25c2010-10-18 17:17:23 -07004248/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004249 * Online state - normal parking state.
4250 */
4251static void
4252bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4253{
4254 struct bfi_rport_qos_scn_s *qos_scn;
4255
4256 bfa_trc(rp->bfa, rp->rport_tag);
4257 bfa_trc(rp->bfa, event);
4258
4259 switch (event) {
4260 case BFA_RPORT_SM_OFFLINE:
4261 bfa_stats(rp, sm_on_off);
4262 if (bfa_rport_send_fwdelete(rp))
4263 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4264 else
4265 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4266 break;
4267
4268 case BFA_RPORT_SM_DELETE:
4269 bfa_stats(rp, sm_on_del);
4270 if (bfa_rport_send_fwdelete(rp))
4271 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4272 else
4273 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4274 break;
4275
4276 case BFA_RPORT_SM_HWFAIL:
4277 bfa_stats(rp, sm_on_hwf);
4278 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4279 break;
4280
4281 case BFA_RPORT_SM_SET_SPEED:
4282 bfa_rport_send_fwspeed(rp);
4283 break;
4284
4285 case BFA_RPORT_SM_QOS_SCN:
4286 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4287 rp->qos_attr = qos_scn->new_qos_attr;
4288 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4289 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4290 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4291 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4292
4293 qos_scn->old_qos_attr.qos_flow_id =
Jing Huangba816ea2010-10-18 17:10:50 -07004294 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004295 qos_scn->new_qos_attr.qos_flow_id =
Jing Huangba816ea2010-10-18 17:10:50 -07004296 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004297
4298 if (qos_scn->old_qos_attr.qos_flow_id !=
4299 qos_scn->new_qos_attr.qos_flow_id)
4300 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4301 qos_scn->old_qos_attr,
4302 qos_scn->new_qos_attr);
4303 if (qos_scn->old_qos_attr.qos_priority !=
4304 qos_scn->new_qos_attr.qos_priority)
4305 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4306 qos_scn->old_qos_attr,
4307 qos_scn->new_qos_attr);
4308 break;
4309
4310 default:
4311 bfa_stats(rp, sm_on_unexp);
4312 bfa_sm_fault(rp->bfa, event);
4313 }
4314}
4315
Jing Huang5fbe25c2010-10-18 17:17:23 -07004316/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004317 * Firmware rport is being deleted - awaiting f/w response.
4318 */
4319static void
4320bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4321{
4322 bfa_trc(rp->bfa, rp->rport_tag);
4323 bfa_trc(rp->bfa, event);
4324
4325 switch (event) {
4326 case BFA_RPORT_SM_FWRSP:
4327 bfa_stats(rp, sm_fwd_rsp);
4328 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4329 bfa_rport_offline_cb(rp);
4330 break;
4331
4332 case BFA_RPORT_SM_DELETE:
4333 bfa_stats(rp, sm_fwd_del);
4334 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4335 break;
4336
4337 case BFA_RPORT_SM_HWFAIL:
4338 bfa_stats(rp, sm_fwd_hwf);
4339 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4340 bfa_rport_offline_cb(rp);
4341 break;
4342
4343 default:
4344 bfa_stats(rp, sm_fwd_unexp);
4345 bfa_sm_fault(rp->bfa, event);
4346 }
4347}
4348
4349static void
4350bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4351{
4352 bfa_trc(rp->bfa, rp->rport_tag);
4353 bfa_trc(rp->bfa, event);
4354
4355 switch (event) {
4356 case BFA_RPORT_SM_QRESUME:
4357 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4358 bfa_rport_send_fwdelete(rp);
4359 break;
4360
4361 case BFA_RPORT_SM_DELETE:
4362 bfa_stats(rp, sm_fwd_del);
4363 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4364 break;
4365
4366 case BFA_RPORT_SM_HWFAIL:
4367 bfa_stats(rp, sm_fwd_hwf);
4368 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4369 bfa_reqq_wcancel(&rp->reqq_wait);
4370 bfa_rport_offline_cb(rp);
4371 break;
4372
4373 default:
4374 bfa_stats(rp, sm_fwd_unexp);
4375 bfa_sm_fault(rp->bfa, event);
4376 }
4377}
4378
Jing Huang5fbe25c2010-10-18 17:17:23 -07004379/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004380 * Offline state.
4381 */
4382static void
4383bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4384{
4385 bfa_trc(rp->bfa, rp->rport_tag);
4386 bfa_trc(rp->bfa, event);
4387
4388 switch (event) {
4389 case BFA_RPORT_SM_DELETE:
4390 bfa_stats(rp, sm_off_del);
4391 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4392 bfa_rport_free(rp);
4393 break;
4394
4395 case BFA_RPORT_SM_ONLINE:
4396 bfa_stats(rp, sm_off_on);
4397 if (bfa_rport_send_fwcreate(rp))
4398 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4399 else
4400 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4401 break;
4402
4403 case BFA_RPORT_SM_HWFAIL:
4404 bfa_stats(rp, sm_off_hwf);
4405 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4406 break;
4407
Krishna Gudipati61ba4392012-08-22 19:52:58 -07004408 case BFA_RPORT_SM_OFFLINE:
4409 bfa_rport_offline_cb(rp);
4410 break;
4411
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004412 default:
4413 bfa_stats(rp, sm_off_unexp);
4414 bfa_sm_fault(rp->bfa, event);
4415 }
4416}
4417
Jing Huang5fbe25c2010-10-18 17:17:23 -07004418/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004419 * Rport is deleted, waiting for firmware response to delete.
4420 */
4421static void
4422bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4423{
4424 bfa_trc(rp->bfa, rp->rport_tag);
4425 bfa_trc(rp->bfa, event);
4426
4427 switch (event) {
4428 case BFA_RPORT_SM_FWRSP:
4429 bfa_stats(rp, sm_del_fwrsp);
4430 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4431 bfa_rport_free(rp);
4432 break;
4433
4434 case BFA_RPORT_SM_HWFAIL:
4435 bfa_stats(rp, sm_del_hwf);
4436 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4437 bfa_rport_free(rp);
4438 break;
4439
4440 default:
4441 bfa_sm_fault(rp->bfa, event);
4442 }
4443}
4444
4445static void
4446bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4447{
4448 bfa_trc(rp->bfa, rp->rport_tag);
4449 bfa_trc(rp->bfa, event);
4450
4451 switch (event) {
4452 case BFA_RPORT_SM_QRESUME:
4453 bfa_stats(rp, sm_del_fwrsp);
4454 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4455 bfa_rport_send_fwdelete(rp);
4456 break;
4457
4458 case BFA_RPORT_SM_HWFAIL:
4459 bfa_stats(rp, sm_del_hwf);
4460 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4461 bfa_reqq_wcancel(&rp->reqq_wait);
4462 bfa_rport_free(rp);
4463 break;
4464
4465 default:
4466 bfa_sm_fault(rp->bfa, event);
4467 }
4468}
4469
Jing Huang5fbe25c2010-10-18 17:17:23 -07004470/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004471 * Waiting for rport create response from firmware. A delete is pending.
4472 */
4473static void
4474bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4475 enum bfa_rport_event event)
4476{
4477 bfa_trc(rp->bfa, rp->rport_tag);
4478 bfa_trc(rp->bfa, event);
4479
4480 switch (event) {
4481 case BFA_RPORT_SM_FWRSP:
4482 bfa_stats(rp, sm_delp_fwrsp);
4483 if (bfa_rport_send_fwdelete(rp))
4484 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4485 else
4486 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4487 break;
4488
4489 case BFA_RPORT_SM_HWFAIL:
4490 bfa_stats(rp, sm_delp_hwf);
4491 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4492 bfa_rport_free(rp);
4493 break;
4494
4495 default:
4496 bfa_stats(rp, sm_delp_unexp);
4497 bfa_sm_fault(rp->bfa, event);
4498 }
4499}
4500
Jing Huang5fbe25c2010-10-18 17:17:23 -07004501/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004502 * Waiting for rport create response from firmware. Rport offline is pending.
4503 */
4504static void
4505bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4506 enum bfa_rport_event event)
4507{
4508 bfa_trc(rp->bfa, rp->rport_tag);
4509 bfa_trc(rp->bfa, event);
4510
4511 switch (event) {
4512 case BFA_RPORT_SM_FWRSP:
4513 bfa_stats(rp, sm_offp_fwrsp);
4514 if (bfa_rport_send_fwdelete(rp))
4515 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4516 else
4517 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4518 break;
4519
4520 case BFA_RPORT_SM_DELETE:
4521 bfa_stats(rp, sm_offp_del);
4522 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4523 break;
4524
4525 case BFA_RPORT_SM_HWFAIL:
4526 bfa_stats(rp, sm_offp_hwf);
4527 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
Krishna Gudipati61ba4392012-08-22 19:52:58 -07004528 bfa_rport_offline_cb(rp);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004529 break;
4530
4531 default:
4532 bfa_stats(rp, sm_offp_unexp);
4533 bfa_sm_fault(rp->bfa, event);
4534 }
4535}
4536
Jing Huang5fbe25c2010-10-18 17:17:23 -07004537/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004538 * IOC h/w failed.
4539 */
4540static void
4541bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4542{
4543 bfa_trc(rp->bfa, rp->rport_tag);
4544 bfa_trc(rp->bfa, event);
4545
4546 switch (event) {
4547 case BFA_RPORT_SM_OFFLINE:
4548 bfa_stats(rp, sm_iocd_off);
4549 bfa_rport_offline_cb(rp);
4550 break;
4551
4552 case BFA_RPORT_SM_DELETE:
4553 bfa_stats(rp, sm_iocd_del);
4554 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4555 bfa_rport_free(rp);
4556 break;
4557
4558 case BFA_RPORT_SM_ONLINE:
4559 bfa_stats(rp, sm_iocd_on);
4560 if (bfa_rport_send_fwcreate(rp))
4561 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4562 else
4563 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4564 break;
4565
4566 case BFA_RPORT_SM_HWFAIL:
4567 break;
4568
4569 default:
4570 bfa_stats(rp, sm_iocd_unexp);
4571 bfa_sm_fault(rp->bfa, event);
4572 }
4573}
4574
4575
4576
Jing Huang5fbe25c2010-10-18 17:17:23 -07004577/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004578 * bfa_rport_private BFA rport private functions
4579 */
4580
4581static void
4582__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4583{
4584 struct bfa_rport_s *rp = cbarg;
4585
4586 if (complete)
4587 bfa_cb_rport_online(rp->rport_drv);
4588}
4589
4590static void
4591__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4592{
4593 struct bfa_rport_s *rp = cbarg;
4594
4595 if (complete)
4596 bfa_cb_rport_offline(rp->rport_drv);
4597}
4598
4599static void
4600bfa_rport_qresume(void *cbarg)
4601{
4602 struct bfa_rport_s *rp = cbarg;
4603
4604 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4605}
4606
4607static void
Krishna Gudipati45070252011-06-24 20:24:29 -07004608bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4609 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004610{
Krishna Gudipati45070252011-06-24 20:24:29 -07004611 struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4612
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004613 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4614 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4615
Krishna Gudipati45070252011-06-24 20:24:29 -07004616 /* kva memory */
4617 bfa_mem_kva_setup(minfo, rport_kva,
4618 cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004619}
4620
4621static void
4622bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07004623 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004624{
4625 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4626 struct bfa_rport_s *rp;
4627 u16 i;
4628
4629 INIT_LIST_HEAD(&mod->rp_free_q);
4630 INIT_LIST_HEAD(&mod->rp_active_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004631 INIT_LIST_HEAD(&mod->rp_unused_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004632
Krishna Gudipati45070252011-06-24 20:24:29 -07004633 rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004634 mod->rps_list = rp;
4635 mod->num_rports = cfg->fwcfg.num_rports;
4636
Jing Huangd4b671c2010-12-26 21:46:35 -08004637 WARN_ON(!mod->num_rports ||
4638 (mod->num_rports & (mod->num_rports - 1)));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004639
4640 for (i = 0; i < mod->num_rports; i++, rp++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004641 memset(rp, 0, sizeof(struct bfa_rport_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004642 rp->bfa = bfa;
4643 rp->rport_tag = i;
4644 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4645
Jing Huang5fbe25c2010-10-18 17:17:23 -07004646 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004647 * - is unused
4648 */
4649 if (i)
4650 list_add_tail(&rp->qe, &mod->rp_free_q);
4651
4652 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4653 }
4654
Jing Huang5fbe25c2010-10-18 17:17:23 -07004655 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004656 * consume memory
4657 */
Krishna Gudipati45070252011-06-24 20:24:29 -07004658 bfa_mem_kva_curp(mod) = (u8 *) rp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004659}
4660
4661static void
4662bfa_rport_detach(struct bfa_s *bfa)
4663{
4664}
4665
4666static void
4667bfa_rport_start(struct bfa_s *bfa)
4668{
4669}
4670
4671static void
4672bfa_rport_stop(struct bfa_s *bfa)
4673{
4674}
4675
4676static void
4677bfa_rport_iocdisable(struct bfa_s *bfa)
4678{
4679 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4680 struct bfa_rport_s *rport;
4681 struct list_head *qe, *qen;
4682
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004683 /* Enqueue unused rport resources to free_q */
4684 list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4685
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004686 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4687 rport = (struct bfa_rport_s *) qe;
4688 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4689 }
4690}
4691
4692static struct bfa_rport_s *
4693bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4694{
4695 struct bfa_rport_s *rport;
4696
4697 bfa_q_deq(&mod->rp_free_q, &rport);
4698 if (rport)
4699 list_add_tail(&rport->qe, &mod->rp_active_q);
4700
4701 return rport;
4702}
4703
4704static void
4705bfa_rport_free(struct bfa_rport_s *rport)
4706{
4707 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4708
Jing Huangd4b671c2010-12-26 21:46:35 -08004709 WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004710 list_del(&rport->qe);
4711 list_add_tail(&rport->qe, &mod->rp_free_q);
4712}
4713
4714static bfa_boolean_t
4715bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4716{
4717 struct bfi_rport_create_req_s *m;
4718
Jing Huang5fbe25c2010-10-18 17:17:23 -07004719 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004720 * check for room in queue to send request now
4721 */
4722 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4723 if (!m) {
4724 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4725 return BFA_FALSE;
4726 }
4727
4728 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004729 bfa_fn_lpu(rp->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004730 m->bfa_handle = rp->rport_tag;
Jing Huangba816ea2010-10-18 17:10:50 -07004731 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004732 m->pid = rp->rport_info.pid;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004733 m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004734 m->local_pid = rp->rport_info.local_pid;
4735 m->fc_class = rp->rport_info.fc_class;
4736 m->vf_en = rp->rport_info.vf_en;
4737 m->vf_id = rp->rport_info.vf_id;
4738 m->cisc = rp->rport_info.cisc;
4739
Jing Huang5fbe25c2010-10-18 17:17:23 -07004740 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004741 * queue I/O message to firmware
4742 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004743 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004744 return BFA_TRUE;
4745}
4746
4747static bfa_boolean_t
4748bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4749{
4750 struct bfi_rport_delete_req_s *m;
4751
Jing Huang5fbe25c2010-10-18 17:17:23 -07004752 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004753 * check for room in queue to send request now
4754 */
4755 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4756 if (!m) {
4757 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4758 return BFA_FALSE;
4759 }
4760
4761 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004762 bfa_fn_lpu(rp->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004763 m->fw_handle = rp->fw_handle;
4764
Jing Huang5fbe25c2010-10-18 17:17:23 -07004765 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004766 * queue I/O message to firmware
4767 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004768 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004769 return BFA_TRUE;
4770}
4771
4772static bfa_boolean_t
4773bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4774{
4775 struct bfa_rport_speed_req_s *m;
4776
Jing Huang5fbe25c2010-10-18 17:17:23 -07004777 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004778 * check for room in queue to send request now
4779 */
4780 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4781 if (!m) {
4782 bfa_trc(rp->bfa, rp->rport_info.speed);
4783 return BFA_FALSE;
4784 }
4785
4786 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004787 bfa_fn_lpu(rp->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004788 m->fw_handle = rp->fw_handle;
4789 m->speed = (u8)rp->rport_info.speed;
4790
Jing Huang5fbe25c2010-10-18 17:17:23 -07004791 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004792 * queue I/O message to firmware
4793 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004794 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004795 return BFA_TRUE;
4796}
4797
4798
4799
Jing Huang5fbe25c2010-10-18 17:17:23 -07004800/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004801 * bfa_rport_public
4802 */
4803
Jing Huang5fbe25c2010-10-18 17:17:23 -07004804/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004805 * Rport interrupt processing.
4806 */
4807void
4808bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4809{
4810 union bfi_rport_i2h_msg_u msg;
4811 struct bfa_rport_s *rp;
4812
4813 bfa_trc(bfa, m->mhdr.msg_id);
4814
4815 msg.msg = m;
4816
4817 switch (m->mhdr.msg_id) {
4818 case BFI_RPORT_I2H_CREATE_RSP:
4819 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4820 rp->fw_handle = msg.create_rsp->fw_handle;
4821 rp->qos_attr = msg.create_rsp->qos_attr;
Krishna Gudipati83763d52011-07-20 17:04:03 -07004822 bfa_rport_set_lunmask(bfa, rp);
Jing Huangd4b671c2010-12-26 21:46:35 -08004823 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004824 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4825 break;
4826
4827 case BFI_RPORT_I2H_DELETE_RSP:
4828 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
Jing Huangd4b671c2010-12-26 21:46:35 -08004829 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
Krishna Gudipati83763d52011-07-20 17:04:03 -07004830 bfa_rport_unset_lunmask(bfa, rp);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004831 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4832 break;
4833
4834 case BFI_RPORT_I2H_QOS_SCN:
4835 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4836 rp->event_arg.fw_msg = msg.qos_scn_evt;
4837 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4838 break;
4839
Krishna Gudipatibc0e2c22012-09-21 17:23:59 -07004840 case BFI_RPORT_I2H_LIP_SCN_ONLINE:
4841 bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
4842 &msg.lip_scn->loop_info);
4843 bfa_cb_rport_scn_online(bfa);
4844 break;
4845
4846 case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
4847 bfa_cb_rport_scn_offline(bfa);
4848 break;
4849
4850 case BFI_RPORT_I2H_NO_DEV:
4851 rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
4852 bfa_cb_rport_scn_no_dev(rp->rport_drv);
4853 break;
4854
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004855 default:
4856 bfa_trc(bfa, m->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08004857 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004858 }
4859}
4860
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004861void
4862bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
4863{
4864 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4865 struct list_head *qe;
4866 int i;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004867
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004868 for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
4869 bfa_q_deq_tail(&mod->rp_free_q, &qe);
4870 list_add_tail(qe, &mod->rp_unused_q);
4871 }
4872}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004873
Jing Huang5fbe25c2010-10-18 17:17:23 -07004874/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004875 * bfa_rport_api
4876 */
4877
4878struct bfa_rport_s *
4879bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4880{
4881 struct bfa_rport_s *rp;
4882
4883 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4884
4885 if (rp == NULL)
4886 return NULL;
4887
4888 rp->bfa = bfa;
4889 rp->rport_drv = rport_drv;
Maggie Zhangf7f738122010-12-09 19:08:43 -08004890 memset(&rp->stats, 0, sizeof(rp->stats));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004891
Jing Huangd4b671c2010-12-26 21:46:35 -08004892 WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004893 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4894
4895 return rp;
4896}
4897
4898void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004899bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4900{
Jing Huangd4b671c2010-12-26 21:46:35 -08004901 WARN_ON(rport_info->max_frmsz == 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004902
Jing Huang5fbe25c2010-10-18 17:17:23 -07004903 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004904 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4905 * responses. Default to minimum size.
4906 */
4907 if (rport_info->max_frmsz == 0) {
4908 bfa_trc(rport->bfa, rport->rport_tag);
4909 rport_info->max_frmsz = FC_MIN_PDUSZ;
4910 }
4911
Jing Huang6a18b162010-10-18 17:08:54 -07004912 rport->rport_info = *rport_info;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004913 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4914}
4915
4916void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004917bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4918{
Jing Huangd4b671c2010-12-26 21:46:35 -08004919 WARN_ON(speed == 0);
4920 WARN_ON(speed == BFA_PORT_SPEED_AUTO);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004921
Krishna Gudipati61ba4392012-08-22 19:52:58 -07004922 if (rport) {
4923 rport->rport_info.speed = speed;
4924 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4925 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004926}
4927
Krishna Gudipati83763d52011-07-20 17:04:03 -07004928/* Set Rport LUN Mask */
4929void
4930bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4931{
4932 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
4933 wwn_t lp_wwn, rp_wwn;
4934 u8 lp_tag = (u8)rp->rport_info.lp_tag;
4935
4936 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4937 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4938
4939 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4940 rp->lun_mask = BFA_TRUE;
4941 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
4942}
4943
4944/* Unset Rport LUN mask */
4945void
4946bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
4947{
4948 struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
4949 wwn_t lp_wwn, rp_wwn;
4950
4951 rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
4952 lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
4953
4954 BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
4955 rp->lun_mask = BFA_FALSE;
4956 bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
4957 BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
4958}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004959
Jing Huang5fbe25c2010-10-18 17:17:23 -07004960/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004961 * SGPG related functions
4962 */
4963
Jing Huang5fbe25c2010-10-18 17:17:23 -07004964/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004965 * Compute and return memory needed by FCP(im) module.
4966 */
4967static void
Krishna Gudipati45070252011-06-24 20:24:29 -07004968bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4969 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004970{
Krishna Gudipati45070252011-06-24 20:24:29 -07004971 struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
4972 struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
4973 struct bfa_mem_dma_s *seg_ptr;
4974 u16 nsegs, idx, per_seg_sgpg, num_sgpg;
4975 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
4976
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004977 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4978 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
Krishna Gudipati45070252011-06-24 20:24:29 -07004979 else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
4980 cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004981
Krishna Gudipati45070252011-06-24 20:24:29 -07004982 num_sgpg = cfg->drvcfg.num_sgpgs;
4983
4984 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4985 per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
4986
4987 bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
4988 if (num_sgpg >= per_seg_sgpg) {
4989 num_sgpg -= per_seg_sgpg;
4990 bfa_mem_dma_setup(minfo, seg_ptr,
4991 per_seg_sgpg * sgpg_sz);
4992 } else
4993 bfa_mem_dma_setup(minfo, seg_ptr,
4994 num_sgpg * sgpg_sz);
4995 }
4996
4997 /* kva memory */
4998 bfa_mem_kva_setup(minfo, sgpg_kva,
4999 cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005000}
5001
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005002static void
5003bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07005004 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005005{
5006 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005007 struct bfa_sgpg_s *hsgpg;
5008 struct bfi_sgpg_s *sgpg;
5009 u64 align_len;
Krishna Gudipati45070252011-06-24 20:24:29 -07005010 struct bfa_mem_dma_s *seg_ptr;
5011 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
5012 u16 i, idx, nsegs, per_seg_sgpg, num_sgpg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005013
5014 union {
5015 u64 pa;
5016 union bfi_addr_u addr;
5017 } sgpg_pa, sgpg_pa_tmp;
5018
5019 INIT_LIST_HEAD(&mod->sgpg_q);
5020 INIT_LIST_HEAD(&mod->sgpg_wait_q);
5021
5022 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
5023
Krishna Gudipati45070252011-06-24 20:24:29 -07005024 mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005025
Krishna Gudipati45070252011-06-24 20:24:29 -07005026 num_sgpg = cfg->drvcfg.num_sgpgs;
5027 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005028
Krishna Gudipati45070252011-06-24 20:24:29 -07005029 /* dma/kva mem claim */
5030 hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005031
Krishna Gudipati45070252011-06-24 20:24:29 -07005032 bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005033
Krishna Gudipati45070252011-06-24 20:24:29 -07005034 if (!bfa_mem_dma_virt(seg_ptr))
5035 break;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005036
Krishna Gudipati45070252011-06-24 20:24:29 -07005037 align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
5038 bfa_mem_dma_phys(seg_ptr);
5039
5040 sgpg = (struct bfi_sgpg_s *)
5041 (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
5042 sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
5043 WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
5044
5045 per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
5046
5047 for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
5048 memset(hsgpg, 0, sizeof(*hsgpg));
5049 memset(sgpg, 0, sizeof(*sgpg));
5050
5051 hsgpg->sgpg = sgpg;
5052 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
5053 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
5054 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
5055
5056 sgpg++;
5057 hsgpg++;
5058 sgpg_pa.pa += sgpg_sz;
5059 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005060 }
5061
Krishna Gudipati45070252011-06-24 20:24:29 -07005062 bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005063}
5064
5065static void
5066bfa_sgpg_detach(struct bfa_s *bfa)
5067{
5068}
5069
5070static void
5071bfa_sgpg_start(struct bfa_s *bfa)
5072{
5073}
5074
5075static void
5076bfa_sgpg_stop(struct bfa_s *bfa)
5077{
5078}
5079
5080static void
5081bfa_sgpg_iocdisable(struct bfa_s *bfa)
5082{
5083}
5084
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005085bfa_status_t
5086bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5087{
5088 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5089 struct bfa_sgpg_s *hsgpg;
5090 int i;
5091
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005092 if (mod->free_sgpgs < nsgpgs)
5093 return BFA_STATUS_ENOMEM;
5094
5095 for (i = 0; i < nsgpgs; i++) {
5096 bfa_q_deq(&mod->sgpg_q, &hsgpg);
Jing Huangd4b671c2010-12-26 21:46:35 -08005097 WARN_ON(!hsgpg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005098 list_add_tail(&hsgpg->qe, sgpg_q);
5099 }
5100
5101 mod->free_sgpgs -= nsgpgs;
5102 return BFA_STATUS_OK;
5103}
5104
5105void
5106bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5107{
5108 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5109 struct bfa_sgpg_wqe_s *wqe;
5110
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005111 mod->free_sgpgs += nsgpg;
Jing Huangd4b671c2010-12-26 21:46:35 -08005112 WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005113
5114 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5115
5116 if (list_empty(&mod->sgpg_wait_q))
5117 return;
5118
Jing Huang5fbe25c2010-10-18 17:17:23 -07005119 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005120 * satisfy as many waiting requests as possible
5121 */
5122 do {
5123 wqe = bfa_q_first(&mod->sgpg_wait_q);
5124 if (mod->free_sgpgs < wqe->nsgpg)
5125 nsgpg = mod->free_sgpgs;
5126 else
5127 nsgpg = wqe->nsgpg;
5128 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5129 wqe->nsgpg -= nsgpg;
5130 if (wqe->nsgpg == 0) {
5131 list_del(&wqe->qe);
5132 wqe->cbfn(wqe->cbarg);
5133 }
5134 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5135}
5136
5137void
5138bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5139{
5140 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5141
Jing Huangd4b671c2010-12-26 21:46:35 -08005142 WARN_ON(nsgpg <= 0);
5143 WARN_ON(nsgpg <= mod->free_sgpgs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005144
5145 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5146
Jing Huang5fbe25c2010-10-18 17:17:23 -07005147 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005148 * allocate any left to this one first
5149 */
5150 if (mod->free_sgpgs) {
Jing Huang5fbe25c2010-10-18 17:17:23 -07005151 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005152 * no one else is waiting for SGPG
5153 */
Jing Huangd4b671c2010-12-26 21:46:35 -08005154 WARN_ON(!list_empty(&mod->sgpg_wait_q));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005155 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5156 wqe->nsgpg -= mod->free_sgpgs;
5157 mod->free_sgpgs = 0;
5158 }
5159
5160 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5161}
5162
5163void
5164bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5165{
5166 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5167
Jing Huangd4b671c2010-12-26 21:46:35 -08005168 WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005169 list_del(&wqe->qe);
5170
5171 if (wqe->nsgpg_total != wqe->nsgpg)
5172 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5173 wqe->nsgpg_total - wqe->nsgpg);
5174}
5175
5176void
5177bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5178 void *cbarg)
5179{
5180 INIT_LIST_HEAD(&wqe->sgpg_q);
5181 wqe->cbfn = cbfn;
5182 wqe->cbarg = cbarg;
5183}
5184
Jing Huang5fbe25c2010-10-18 17:17:23 -07005185/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005186 * UF related functions
5187 */
5188/*
5189 *****************************************************************************
5190 * Internal functions
5191 *****************************************************************************
5192 */
5193static void
5194__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5195{
5196 struct bfa_uf_s *uf = cbarg;
5197 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5198
5199 if (complete)
5200 ufm->ufrecv(ufm->cbarg, uf);
5201}
5202
5203static void
Krishna Gudipati45070252011-06-24 20:24:29 -07005204claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005205{
5206 struct bfi_uf_buf_post_s *uf_bp_msg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005207 u16 i;
5208 u16 buf_len;
5209
Krishna Gudipati45070252011-06-24 20:24:29 -07005210 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005211 uf_bp_msg = ufm->uf_buf_posts;
5212
5213 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5214 i++, uf_bp_msg++) {
Jing Huang6a18b162010-10-18 17:08:54 -07005215 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005216
5217 uf_bp_msg->buf_tag = i;
5218 buf_len = sizeof(struct bfa_uf_buf_s);
Jing Huangba816ea2010-10-18 17:10:50 -07005219 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005220 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005221 bfa_fn_lpu(ufm->bfa));
Krishna Gudipati85ce9282011-06-13 15:39:36 -07005222 bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005223 }
5224
Jing Huang5fbe25c2010-10-18 17:17:23 -07005225 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005226 * advance pointer beyond consumed memory
5227 */
Krishna Gudipati45070252011-06-24 20:24:29 -07005228 bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005229}
5230
5231static void
Krishna Gudipati45070252011-06-24 20:24:29 -07005232claim_ufs(struct bfa_uf_mod_s *ufm)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005233{
5234 u16 i;
5235 struct bfa_uf_s *uf;
5236
5237 /*
5238 * Claim block of memory for UF list
5239 */
Krishna Gudipati45070252011-06-24 20:24:29 -07005240 ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005241
5242 /*
5243 * Initialize UFs and queue it in UF free queue
5244 */
5245 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
Jing Huang6a18b162010-10-18 17:08:54 -07005246 memset(uf, 0, sizeof(struct bfa_uf_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005247 uf->bfa = ufm->bfa;
5248 uf->uf_tag = i;
Krishna Gudipati45070252011-06-24 20:24:29 -07005249 uf->pb_len = BFA_PER_UF_DMA_SZ;
5250 uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005251 uf->buf_pa = ufm_pbs_pa(ufm, i);
5252 list_add_tail(&uf->qe, &ufm->uf_free_q);
5253 }
5254
Jing Huang5fbe25c2010-10-18 17:17:23 -07005255 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005256 * advance memory pointer
5257 */
Krishna Gudipati45070252011-06-24 20:24:29 -07005258 bfa_mem_kva_curp(ufm) = (u8 *) uf;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005259}
5260
5261static void
Krishna Gudipati45070252011-06-24 20:24:29 -07005262uf_mem_claim(struct bfa_uf_mod_s *ufm)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005263{
Krishna Gudipati45070252011-06-24 20:24:29 -07005264 claim_ufs(ufm);
5265 claim_uf_post_msgs(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005266}
5267
5268static void
Krishna Gudipati45070252011-06-24 20:24:29 -07005269bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5270 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005271{
Krishna Gudipati45070252011-06-24 20:24:29 -07005272 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5273 struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5274 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
5275 struct bfa_mem_dma_s *seg_ptr;
5276 u16 nsegs, idx, per_seg_uf = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005277
Krishna Gudipati45070252011-06-24 20:24:29 -07005278 nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5279 per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005280
Krishna Gudipati45070252011-06-24 20:24:29 -07005281 bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5282 if (num_ufs >= per_seg_uf) {
5283 num_ufs -= per_seg_uf;
5284 bfa_mem_dma_setup(minfo, seg_ptr,
5285 per_seg_uf * BFA_PER_UF_DMA_SZ);
5286 } else
5287 bfa_mem_dma_setup(minfo, seg_ptr,
5288 num_ufs * BFA_PER_UF_DMA_SZ);
5289 }
5290
5291 /* kva memory */
5292 bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5293 (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005294}
5295
5296static void
5297bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07005298 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005299{
5300 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5301
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005302 ufm->bfa = bfa;
5303 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5304 INIT_LIST_HEAD(&ufm->uf_free_q);
5305 INIT_LIST_HEAD(&ufm->uf_posted_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005306 INIT_LIST_HEAD(&ufm->uf_unused_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005307
Krishna Gudipati45070252011-06-24 20:24:29 -07005308 uf_mem_claim(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005309}
5310
5311static void
5312bfa_uf_detach(struct bfa_s *bfa)
5313{
5314}
5315
5316static struct bfa_uf_s *
5317bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5318{
5319 struct bfa_uf_s *uf;
5320
5321 bfa_q_deq(&uf_mod->uf_free_q, &uf);
5322 return uf;
5323}
5324
5325static void
5326bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5327{
5328 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5329}
5330
5331static bfa_status_t
5332bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5333{
5334 struct bfi_uf_buf_post_s *uf_post_msg;
5335
5336 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5337 if (!uf_post_msg)
5338 return BFA_STATUS_FAILED;
5339
Jing Huang6a18b162010-10-18 17:08:54 -07005340 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005341 sizeof(struct bfi_uf_buf_post_s));
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005342 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005343
5344 bfa_trc(ufm->bfa, uf->uf_tag);
5345
5346 list_add_tail(&uf->qe, &ufm->uf_posted_q);
5347 return BFA_STATUS_OK;
5348}
5349
5350static void
5351bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5352{
5353 struct bfa_uf_s *uf;
5354
5355 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5356 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5357 break;
5358 }
5359}
5360
5361static void
5362uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5363{
5364 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5365 u16 uf_tag = m->buf_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005366 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
Krishna Gudipati45070252011-06-24 20:24:29 -07005367 struct bfa_uf_buf_s *uf_buf;
5368 uint8_t *buf;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005369 struct fchs_s *fchs;
5370
Krishna Gudipati45070252011-06-24 20:24:29 -07005371 uf_buf = (struct bfa_uf_buf_s *)
5372 bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5373 buf = &uf_buf->d[0];
5374
Jing Huangba816ea2010-10-18 17:10:50 -07005375 m->frm_len = be16_to_cpu(m->frm_len);
5376 m->xfr_len = be16_to_cpu(m->xfr_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005377
5378 fchs = (struct fchs_s *)uf_buf;
5379
5380 list_del(&uf->qe); /* dequeue from posted queue */
5381
5382 uf->data_ptr = buf;
5383 uf->data_len = m->xfr_len;
5384
Jing Huangd4b671c2010-12-26 21:46:35 -08005385 WARN_ON(uf->data_len < sizeof(struct fchs_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005386
5387 if (uf->data_len == sizeof(struct fchs_s)) {
5388 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5389 uf->data_len, (struct fchs_s *)buf);
5390 } else {
5391 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5392 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5393 BFA_PL_EID_RX, uf->data_len,
5394 (struct fchs_s *)buf, pld_w0);
5395 }
5396
5397 if (bfa->fcs)
5398 __bfa_cb_uf_recv(uf, BFA_TRUE);
5399 else
5400 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5401}
5402
5403static void
5404bfa_uf_stop(struct bfa_s *bfa)
5405{
5406}
5407
5408static void
5409bfa_uf_iocdisable(struct bfa_s *bfa)
5410{
5411 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5412 struct bfa_uf_s *uf;
5413 struct list_head *qe, *qen;
5414
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005415 /* Enqueue unused uf resources to free_q */
5416 list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5417
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005418 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5419 uf = (struct bfa_uf_s *) qe;
5420 list_del(&uf->qe);
5421 bfa_uf_put(ufm, uf);
5422 }
5423}
5424
5425static void
5426bfa_uf_start(struct bfa_s *bfa)
5427{
5428 bfa_uf_post_all(BFA_UF_MOD(bfa));
5429}
5430
Jing Huang5fbe25c2010-10-18 17:17:23 -07005431/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005432 * Register handler for all unsolicted receive frames.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005433 *
5434 * @param[in] bfa BFA instance
5435 * @param[in] ufrecv receive handler function
5436 * @param[in] cbarg receive handler arg
5437 */
5438void
5439bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5440{
5441 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5442
5443 ufm->ufrecv = ufrecv;
5444 ufm->cbarg = cbarg;
5445}
5446
Jing Huang5fbe25c2010-10-18 17:17:23 -07005447/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005448 * Free an unsolicited frame back to BFA.
5449 *
5450 * @param[in] uf unsolicited frame to be freed
5451 *
5452 * @return None
5453 */
5454void
5455bfa_uf_free(struct bfa_uf_s *uf)
5456{
5457 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5458 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5459}
5460
5461
5462
Jing Huang5fbe25c2010-10-18 17:17:23 -07005463/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005464 * uf_pub BFA uf module public functions
5465 */
5466void
5467bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5468{
5469 bfa_trc(bfa, msg->mhdr.msg_id);
5470
5471 switch (msg->mhdr.msg_id) {
5472 case BFI_UF_I2H_FRM_RCVD:
5473 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5474 break;
5475
5476 default:
5477 bfa_trc(bfa, msg->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08005478 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005479 }
5480}
5481
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005482void
5483bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5484{
5485 struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa);
5486 struct list_head *qe;
5487 int i;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005488
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005489 for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5490 bfa_q_deq_tail(&mod->uf_free_q, &qe);
5491 list_add_tail(qe, &mod->uf_unused_q);
5492 }
5493}
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07005494
5495/*
Krishna Gudipatie3535462012-09-21 17:26:07 -07005496 * Dport forward declaration
5497 */
5498
5499/*
5500 * BFA DPORT state machine events
5501 */
5502enum bfa_dport_sm_event {
5503 BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
5504 BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
5505 BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
5506 BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
5507 BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
5508};
5509
5510static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
5511 enum bfa_dport_sm_event event);
5512static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
5513 enum bfa_dport_sm_event event);
5514static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
5515 enum bfa_dport_sm_event event);
5516static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
5517 enum bfa_dport_sm_event event);
5518static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
5519 enum bfa_dport_sm_event event);
5520static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
5521 enum bfa_dport_sm_event event);
5522static void bfa_dport_qresume(void *cbarg);
5523static void bfa_dport_req_comp(struct bfa_dport_s *dport,
5524 bfi_diag_dport_rsp_t *msg);
5525
5526/*
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07005527 * BFA fcdiag module
5528 */
5529#define BFA_DIAG_QTEST_TOV 1000 /* msec */
5530
5531/*
5532 * Set port status to busy
5533 */
5534static void
5535bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
5536{
5537 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
5538
5539 if (fcdiag->lb.lock)
5540 fcport->diag_busy = BFA_TRUE;
5541 else
5542 fcport->diag_busy = BFA_FALSE;
5543}
5544
5545static void
5546bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
5547 struct bfa_s *bfa)
5548{
5549}
5550
5551static void
5552bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5553 struct bfa_pcidev_s *pcidev)
5554{
5555 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
Krishna Gudipatie3535462012-09-21 17:26:07 -07005556 struct bfa_dport_s *dport = &fcdiag->dport;
5557
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07005558 fcdiag->bfa = bfa;
5559 fcdiag->trcmod = bfa->trcmod;
5560 /* The common DIAG attach bfa_diag_attach() will do all memory claim */
Krishna Gudipatie3535462012-09-21 17:26:07 -07005561 dport->bfa = bfa;
5562 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
5563 bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
5564 dport->cbfn = NULL;
5565 dport->cbarg = NULL;
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07005566}
5567
5568static void
5569bfa_fcdiag_iocdisable(struct bfa_s *bfa)
5570{
5571 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
Krishna Gudipatie3535462012-09-21 17:26:07 -07005572 struct bfa_dport_s *dport = &fcdiag->dport;
5573
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07005574 bfa_trc(fcdiag, fcdiag->lb.lock);
5575 if (fcdiag->lb.lock) {
5576 fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
5577 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5578 fcdiag->lb.lock = 0;
5579 bfa_fcdiag_set_busy_status(fcdiag);
5580 }
Krishna Gudipatie3535462012-09-21 17:26:07 -07005581
5582 bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07005583}
5584
5585static void
5586bfa_fcdiag_detach(struct bfa_s *bfa)
5587{
5588}
5589
5590static void
5591bfa_fcdiag_start(struct bfa_s *bfa)
5592{
5593}
5594
5595static void
5596bfa_fcdiag_stop(struct bfa_s *bfa)
5597{
5598}
5599
5600static void
5601bfa_fcdiag_queuetest_timeout(void *cbarg)
5602{
5603 struct bfa_fcdiag_s *fcdiag = cbarg;
5604 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5605
5606 bfa_trc(fcdiag, fcdiag->qtest.all);
5607 bfa_trc(fcdiag, fcdiag->qtest.count);
5608
5609 fcdiag->qtest.timer_active = 0;
5610
5611 res->status = BFA_STATUS_ETIMER;
5612 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5613 if (fcdiag->qtest.all)
5614 res->queue = fcdiag->qtest.all;
5615
5616 bfa_trc(fcdiag, BFA_STATUS_ETIMER);
5617 fcdiag->qtest.status = BFA_STATUS_ETIMER;
5618 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5619 fcdiag->qtest.lock = 0;
5620}
5621
5622static bfa_status_t
5623bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
5624{
5625 u32 i;
5626 struct bfi_diag_qtest_req_s *req;
5627
5628 req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
5629 if (!req)
5630 return BFA_STATUS_DEVBUSY;
5631
5632 /* build host command */
5633 bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
5634 bfa_fn_lpu(fcdiag->bfa));
5635
5636 for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
5637 req->data[i] = QTEST_PAT_DEFAULT;
5638
5639 bfa_trc(fcdiag, fcdiag->qtest.queue);
5640 /* ring door bell */
5641 bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
5642 return BFA_STATUS_OK;
5643}
5644
5645static void
5646bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
5647 bfi_diag_qtest_rsp_t *rsp)
5648{
5649 struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
5650 bfa_status_t status = BFA_STATUS_OK;
5651 int i;
5652
5653 /* Check timer, should still be active */
5654 if (!fcdiag->qtest.timer_active) {
5655 bfa_trc(fcdiag, fcdiag->qtest.timer_active);
5656 return;
5657 }
5658
5659 /* update count */
5660 fcdiag->qtest.count--;
5661
5662 /* Check result */
5663 for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
5664 if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
5665 res->status = BFA_STATUS_DATACORRUPTED;
5666 break;
5667 }
5668 }
5669
5670 if (res->status == BFA_STATUS_OK) {
5671 if (fcdiag->qtest.count > 0) {
5672 status = bfa_fcdiag_queuetest_send(fcdiag);
5673 if (status == BFA_STATUS_OK)
5674 return;
5675 else
5676 res->status = status;
5677 } else if (fcdiag->qtest.all > 0 &&
5678 fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
5679 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5680 fcdiag->qtest.queue++;
5681 status = bfa_fcdiag_queuetest_send(fcdiag);
5682 if (status == BFA_STATUS_OK)
5683 return;
5684 else
5685 res->status = status;
5686 }
5687 }
5688
5689 /* Stop timer when we comp all queue */
5690 if (fcdiag->qtest.timer_active) {
5691 bfa_timer_stop(&fcdiag->qtest.timer);
5692 fcdiag->qtest.timer_active = 0;
5693 }
5694 res->queue = fcdiag->qtest.queue;
5695 res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
5696 bfa_trc(fcdiag, res->count);
5697 bfa_trc(fcdiag, res->status);
5698 fcdiag->qtest.status = res->status;
5699 fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
5700 fcdiag->qtest.lock = 0;
5701}
5702
5703static void
5704bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
5705 struct bfi_diag_lb_rsp_s *rsp)
5706{
5707 struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
5708
5709 res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm);
5710 res->numosffrm = be32_to_cpu(rsp->res.numosffrm);
5711 res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm);
5712 res->badfrminf = be32_to_cpu(rsp->res.badfrminf);
5713 res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum);
5714 res->status = rsp->res.status;
5715 fcdiag->lb.status = rsp->res.status;
5716 bfa_trc(fcdiag, fcdiag->lb.status);
5717 fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
5718 fcdiag->lb.lock = 0;
5719 bfa_fcdiag_set_busy_status(fcdiag);
5720}
5721
5722static bfa_status_t
5723bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
5724 struct bfa_diag_loopback_s *loopback)
5725{
5726 struct bfi_diag_lb_req_s *lb_req;
5727
5728 lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
5729 if (!lb_req)
5730 return BFA_STATUS_DEVBUSY;
5731
5732 /* build host command */
5733 bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
5734 bfa_fn_lpu(fcdiag->bfa));
5735
5736 lb_req->lb_mode = loopback->lb_mode;
5737 lb_req->speed = loopback->speed;
5738 lb_req->loopcnt = loopback->loopcnt;
5739 lb_req->pattern = loopback->pattern;
5740
5741 /* ring door bell */
5742 bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
5743
5744 bfa_trc(fcdiag, loopback->lb_mode);
5745 bfa_trc(fcdiag, loopback->speed);
5746 bfa_trc(fcdiag, loopback->loopcnt);
5747 bfa_trc(fcdiag, loopback->pattern);
5748 return BFA_STATUS_OK;
5749}
5750
5751/*
5752 * cpe/rme intr handler
5753 */
5754void
5755bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5756{
5757 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5758
5759 switch (msg->mhdr.msg_id) {
5760 case BFI_DIAG_I2H_LOOPBACK:
5761 bfa_fcdiag_loopback_comp(fcdiag,
5762 (struct bfi_diag_lb_rsp_s *) msg);
5763 break;
5764 case BFI_DIAG_I2H_QTEST:
5765 bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
5766 break;
Krishna Gudipatie3535462012-09-21 17:26:07 -07005767 case BFI_DIAG_I2H_DPORT:
5768 bfa_dport_req_comp(&fcdiag->dport, (bfi_diag_dport_rsp_t *)msg);
5769 break;
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07005770 default:
5771 bfa_trc(fcdiag, msg->mhdr.msg_id);
5772 WARN_ON(1);
5773 }
5774}
5775
5776/*
5777 * Loopback test
5778 *
5779 * @param[in] *bfa - bfa data struct
5780 * @param[in] opmode - port operation mode
5781 * @param[in] speed - port speed
5782 * @param[in] lpcnt - loop count
5783 * @param[in] pat - pattern to build packet
5784 * @param[in] *result - pt to bfa_diag_loopback_result_t data struct
5785 * @param[in] cbfn - callback function
5786 * @param[in] cbarg - callback functioin arg
5787 *
5788 * @param[out]
5789 */
5790bfa_status_t
5791bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
5792 enum bfa_port_speed speed, u32 lpcnt, u32 pat,
5793 struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
5794 void *cbarg)
5795{
5796 struct bfa_diag_loopback_s loopback;
5797 struct bfa_port_attr_s attr;
5798 bfa_status_t status;
5799 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5800
5801 if (!bfa_iocfc_is_operational(bfa))
5802 return BFA_STATUS_IOC_NON_OP;
5803
5804 /* if port is PBC disabled, return error */
5805 if (bfa_fcport_is_pbcdisabled(bfa)) {
5806 bfa_trc(fcdiag, BFA_STATUS_PBC);
5807 return BFA_STATUS_PBC;
5808 }
5809
5810 if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
5811 bfa_trc(fcdiag, opmode);
5812 return BFA_STATUS_PORT_NOT_DISABLED;
5813 }
5814
Krishna Gudipatifb778b02011-07-20 17:01:07 -07005815 /*
5816 * Check if input speed is supported by the port mode
5817 */
5818 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
5819 if (!(speed == BFA_PORT_SPEED_1GBPS ||
5820 speed == BFA_PORT_SPEED_2GBPS ||
5821 speed == BFA_PORT_SPEED_4GBPS ||
5822 speed == BFA_PORT_SPEED_8GBPS ||
5823 speed == BFA_PORT_SPEED_16GBPS ||
5824 speed == BFA_PORT_SPEED_AUTO)) {
5825 bfa_trc(fcdiag, speed);
5826 return BFA_STATUS_UNSUPP_SPEED;
5827 }
5828 bfa_fcport_get_attr(bfa, &attr);
5829 bfa_trc(fcdiag, attr.speed_supported);
5830 if (speed > attr.speed_supported)
5831 return BFA_STATUS_UNSUPP_SPEED;
5832 } else {
5833 if (speed != BFA_PORT_SPEED_10GBPS) {
5834 bfa_trc(fcdiag, speed);
5835 return BFA_STATUS_UNSUPP_SPEED;
5836 }
5837 }
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07005838
Krishna Gudipatie3535462012-09-21 17:26:07 -07005839 /*
5840 * For CT2, 1G is not supported
5841 */
5842 if ((speed == BFA_PORT_SPEED_1GBPS) &&
5843 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
5844 bfa_trc(fcdiag, speed);
5845 return BFA_STATUS_UNSUPP_SPEED;
5846 }
5847
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07005848 /* For Mezz card, port speed entered needs to be checked */
5849 if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
5850 if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07005851 if (!(speed == BFA_PORT_SPEED_1GBPS ||
5852 speed == BFA_PORT_SPEED_2GBPS ||
5853 speed == BFA_PORT_SPEED_4GBPS ||
5854 speed == BFA_PORT_SPEED_8GBPS ||
5855 speed == BFA_PORT_SPEED_16GBPS ||
5856 speed == BFA_PORT_SPEED_AUTO))
5857 return BFA_STATUS_UNSUPP_SPEED;
5858 } else {
5859 if (speed != BFA_PORT_SPEED_10GBPS)
5860 return BFA_STATUS_UNSUPP_SPEED;
5861 }
5862 }
5863
5864 /* check to see if there is another destructive diag cmd running */
5865 if (fcdiag->lb.lock) {
5866 bfa_trc(fcdiag, fcdiag->lb.lock);
5867 return BFA_STATUS_DEVBUSY;
5868 }
5869
5870 fcdiag->lb.lock = 1;
5871 loopback.lb_mode = opmode;
5872 loopback.speed = speed;
5873 loopback.loopcnt = lpcnt;
5874 loopback.pattern = pat;
5875 fcdiag->lb.result = result;
5876 fcdiag->lb.cbfn = cbfn;
5877 fcdiag->lb.cbarg = cbarg;
5878 memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
5879 bfa_fcdiag_set_busy_status(fcdiag);
5880
5881 /* Send msg to fw */
5882 status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
5883 return status;
5884}
5885
5886/*
5887 * DIAG queue test command
5888 *
5889 * @param[in] *bfa - bfa data struct
5890 * @param[in] force - 1: don't do ioc op checking
5891 * @param[in] queue - queue no. to test
5892 * @param[in] *result - pt to bfa_diag_qtest_result_t data struct
5893 * @param[in] cbfn - callback function
5894 * @param[in] *cbarg - callback functioin arg
5895 *
5896 * @param[out]
5897 */
5898bfa_status_t
5899bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
5900 struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
5901 void *cbarg)
5902{
5903 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5904 bfa_status_t status;
5905 bfa_trc(fcdiag, force);
5906 bfa_trc(fcdiag, queue);
5907
5908 if (!force && !bfa_iocfc_is_operational(bfa))
5909 return BFA_STATUS_IOC_NON_OP;
5910
5911 /* check to see if there is another destructive diag cmd running */
5912 if (fcdiag->qtest.lock) {
5913 bfa_trc(fcdiag, fcdiag->qtest.lock);
5914 return BFA_STATUS_DEVBUSY;
5915 }
5916
5917 /* Initialization */
5918 fcdiag->qtest.lock = 1;
5919 fcdiag->qtest.cbfn = cbfn;
5920 fcdiag->qtest.cbarg = cbarg;
5921 fcdiag->qtest.result = result;
5922 fcdiag->qtest.count = QTEST_CNT_DEFAULT;
5923
5924 /* Init test results */
5925 fcdiag->qtest.result->status = BFA_STATUS_OK;
5926 fcdiag->qtest.result->count = 0;
5927
5928 /* send */
5929 if (queue < BFI_IOC_MAX_CQS) {
5930 fcdiag->qtest.result->queue = (u8)queue;
5931 fcdiag->qtest.queue = (u8)queue;
5932 fcdiag->qtest.all = 0;
5933 } else {
5934 fcdiag->qtest.result->queue = 0;
5935 fcdiag->qtest.queue = 0;
5936 fcdiag->qtest.all = 1;
5937 }
5938 status = bfa_fcdiag_queuetest_send(fcdiag);
5939
5940 /* Start a timer */
5941 if (status == BFA_STATUS_OK) {
5942 bfa_timer_start(bfa, &fcdiag->qtest.timer,
5943 bfa_fcdiag_queuetest_timeout, fcdiag,
5944 BFA_DIAG_QTEST_TOV);
5945 fcdiag->qtest.timer_active = 1;
5946 }
5947 return status;
5948}
5949
5950/*
5951 * DIAG PLB is running
5952 *
5953 * @param[in] *bfa - bfa data struct
5954 *
5955 * @param[out]
5956 */
5957bfa_status_t
5958bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
5959{
5960 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
5961 return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
5962}
Krishna Gudipatie3535462012-09-21 17:26:07 -07005963
5964/*
5965 * D-port
5966 */
5967static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
5968 enum bfi_dport_req req);
5969static void
5970bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
5971{
5972 if (dport->cbfn != NULL) {
5973 dport->cbfn(dport->cbarg, bfa_status);
5974 dport->cbfn = NULL;
5975 dport->cbarg = NULL;
5976 }
5977}
5978
5979static void
5980bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
5981{
5982 bfa_trc(dport->bfa, event);
5983
5984 switch (event) {
5985 case BFA_DPORT_SM_ENABLE:
5986 bfa_fcport_dportenable(dport->bfa);
5987 if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
5988 bfa_sm_set_state(dport, bfa_dport_sm_enabling);
5989 else
5990 bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
5991 break;
5992
5993 case BFA_DPORT_SM_DISABLE:
5994 /* Already disabled */
5995 break;
5996
5997 case BFA_DPORT_SM_HWFAIL:
5998 /* ignore */
5999 break;
6000
6001 default:
6002 bfa_sm_fault(dport->bfa, event);
6003 }
6004}
6005
6006static void
6007bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
6008 enum bfa_dport_sm_event event)
6009{
6010 bfa_trc(dport->bfa, event);
6011
6012 switch (event) {
6013 case BFA_DPORT_SM_QRESUME:
6014 bfa_sm_set_state(dport, bfa_dport_sm_enabling);
6015 bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
6016 break;
6017
6018 case BFA_DPORT_SM_HWFAIL:
6019 bfa_reqq_wcancel(&dport->reqq_wait);
6020 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6021 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6022 break;
6023
6024 default:
6025 bfa_sm_fault(dport->bfa, event);
6026 }
6027}
6028
6029static void
6030bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6031{
6032 bfa_trc(dport->bfa, event);
6033
6034 switch (event) {
6035 case BFA_DPORT_SM_FWRSP:
6036 bfa_sm_set_state(dport, bfa_dport_sm_enabled);
6037 break;
6038
6039 case BFA_DPORT_SM_HWFAIL:
6040 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6041 bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
6042 break;
6043
6044 default:
6045 bfa_sm_fault(dport->bfa, event);
6046 }
6047}
6048
6049static void
6050bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6051{
6052 bfa_trc(dport->bfa, event);
6053
6054 switch (event) {
6055 case BFA_DPORT_SM_ENABLE:
6056 /* Already enabled */
6057 break;
6058
6059 case BFA_DPORT_SM_DISABLE:
6060 bfa_fcport_dportdisable(dport->bfa);
6061 if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
6062 bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6063 else
6064 bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
6065 break;
6066
6067 case BFA_DPORT_SM_HWFAIL:
6068 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6069 break;
6070
6071 default:
6072 bfa_sm_fault(dport->bfa, event);
6073 }
6074}
6075
6076static void
6077bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
6078 enum bfa_dport_sm_event event)
6079{
6080 bfa_trc(dport->bfa, event);
6081
6082 switch (event) {
6083 case BFA_DPORT_SM_QRESUME:
6084 bfa_sm_set_state(dport, bfa_dport_sm_disabling);
6085 bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
6086 break;
6087
6088 case BFA_DPORT_SM_HWFAIL:
6089 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6090 bfa_reqq_wcancel(&dport->reqq_wait);
6091 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6092 break;
6093
6094 default:
6095 bfa_sm_fault(dport->bfa, event);
6096 }
6097}
6098
6099static void
6100bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
6101{
6102 bfa_trc(dport->bfa, event);
6103
6104 switch (event) {
6105 case BFA_DPORT_SM_FWRSP:
6106 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6107 break;
6108
6109 case BFA_DPORT_SM_HWFAIL:
6110 bfa_sm_set_state(dport, bfa_dport_sm_disabled);
6111 bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
6112 break;
6113
6114 default:
6115 bfa_sm_fault(dport->bfa, event);
6116 }
6117}
6118
6119
6120static bfa_boolean_t
6121bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
6122{
6123 struct bfi_diag_dport_req_s *m;
6124
6125 /*
6126 * Increment message tag before queue check, so that responses to old
6127 * requests are discarded.
6128 */
6129 dport->msgtag++;
6130
6131 /*
6132 * check for room in queue to send request now
6133 */
6134 m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
6135 if (!m) {
6136 bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
6137 return BFA_FALSE;
6138 }
6139
6140 bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
6141 bfa_fn_lpu(dport->bfa));
6142 m->req = req;
6143 m->msgtag = dport->msgtag;
6144
6145 /*
6146 * queue I/O message to firmware
6147 */
6148 bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
6149
6150 return BFA_TRUE;
6151}
6152
6153static void
6154bfa_dport_qresume(void *cbarg)
6155{
6156 struct bfa_dport_s *dport = cbarg;
6157
6158 bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
6159}
6160
6161static void
6162bfa_dport_req_comp(struct bfa_dport_s *dport, bfi_diag_dport_rsp_t *msg)
6163{
6164 bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
6165 bfa_cb_fcdiag_dport(dport, msg->status);
6166}
6167
6168/*
6169 * Dport enable
6170 *
6171 * @param[in] *bfa - bfa data struct
6172 */
6173bfa_status_t
6174bfa_dport_enable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6175{
6176 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6177 struct bfa_dport_s *dport = &fcdiag->dport;
6178
6179 /*
6180 * Dport is not support in MEZZ card
6181 */
6182 if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
6183 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6184 return BFA_STATUS_CMD_NOTSUPP_MEZZ;
6185 }
6186
6187 /*
6188 * Check to see if IOC is down
6189 */
6190 if (!bfa_iocfc_is_operational(bfa))
6191 return BFA_STATUS_IOC_NON_OP;
6192
6193 /* if port is PBC disabled, return error */
6194 if (bfa_fcport_is_pbcdisabled(bfa)) {
6195 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6196 return BFA_STATUS_PBC;
6197 }
6198
6199 /*
6200 * Check if port mode is FC port
6201 */
6202 if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
6203 bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
6204 return BFA_STATUS_CMD_NOTSUPP_CNA;
6205 }
6206
6207 /*
6208 * Check if port is in LOOP mode
6209 */
6210 if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
6211 (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
6212 bfa_trc(dport->bfa, 0);
6213 return BFA_STATUS_TOPOLOGY_LOOP;
6214 }
6215
6216 /*
6217 * Check if port is TRUNK mode
6218 */
6219 if (bfa_fcport_is_trunk_enabled(bfa)) {
6220 bfa_trc(dport->bfa, 0);
6221 return BFA_STATUS_ERROR_TRUNK_ENABLED;
6222 }
6223
6224 /*
6225 * Check to see if port is disable or in dport state
6226 */
6227 if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6228 (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6229 bfa_trc(dport->bfa, 0);
6230 return BFA_STATUS_PORT_NOT_DISABLED;
6231 }
6232
6233 /*
6234 * Check if dport is busy
6235 */
6236 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6237 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6238 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6239 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait)) {
6240 return BFA_STATUS_DEVBUSY;
6241 }
6242
6243 /*
6244 * Check if dport is already enabled
6245 */
6246 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
6247 bfa_trc(dport->bfa, 0);
6248 return BFA_STATUS_DPORT_ENABLED;
6249 }
6250
6251 dport->cbfn = cbfn;
6252 dport->cbarg = cbarg;
6253
6254 bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
6255 return BFA_STATUS_OK;
6256}
6257
6258/*
6259 * Dport disable
6260 *
6261 * @param[in] *bfa - bfa data struct
6262 */
6263bfa_status_t
6264bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
6265{
6266 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6267 struct bfa_dport_s *dport = &fcdiag->dport;
6268
6269 if (bfa_ioc_is_disabled(&bfa->ioc))
6270 return BFA_STATUS_IOC_DISABLED;
6271
6272 /* if port is PBC disabled, return error */
6273 if (bfa_fcport_is_pbcdisabled(bfa)) {
6274 bfa_trc(dport->bfa, BFA_STATUS_PBC);
6275 return BFA_STATUS_PBC;
6276 }
6277
6278 /*
6279 * Check to see if port is disable or in dport state
6280 */
6281 if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
6282 (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
6283 bfa_trc(dport->bfa, 0);
6284 return BFA_STATUS_PORT_NOT_DISABLED;
6285 }
6286
6287 /*
6288 * Check if dport is busy
6289 */
6290 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6291 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
6292 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6293 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
6294 return BFA_STATUS_DEVBUSY;
6295
6296 /*
6297 * Check if dport is already disabled
6298 */
6299 if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
6300 bfa_trc(dport->bfa, 0);
6301 return BFA_STATUS_DPORT_DISABLED;
6302 }
6303
6304 dport->cbfn = cbfn;
6305 dport->cbarg = cbarg;
6306
6307 bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
6308 return BFA_STATUS_OK;
6309}
6310
6311/*
6312 * Get D-port state
6313 *
6314 * @param[in] *bfa - bfa data struct
6315 */
6316
6317bfa_status_t
6318bfa_dport_get_state(struct bfa_s *bfa, enum bfa_dport_state *state)
6319{
6320 struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
6321 struct bfa_dport_s *dport = &fcdiag->dport;
6322
6323 if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled))
6324 *state = BFA_DPORT_ST_ENABLED;
6325 else if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
6326 bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait))
6327 *state = BFA_DPORT_ST_ENABLING;
6328 else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled))
6329 *state = BFA_DPORT_ST_DISABLED;
6330 else if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
6331 bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait))
6332 *state = BFA_DPORT_ST_DISABLING;
6333 else {
6334 bfa_trc(dport->bfa, BFA_STATUS_EINVAL);
6335 return BFA_STATUS_EINVAL;
6336 }
6337 return BFA_STATUS_OK;
6338}