blob: 8258f88bfee62b3ac8964c622acdf18fdea64201 [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfi/bfi_uf.h>
20#include <cs/bfa_debug.h>
21
22BFA_TRC_FILE(HAL, FCXP);
23BFA_MODULE(fcxp);
24
25/**
26 * forward declarations
27 */
28static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
29static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
30 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
31static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
32 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
33static void bfa_fcxp_qresume(void *cbarg);
34static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
35 struct bfi_fcxp_send_req_s *send_req);
36
37/**
38 * fcxp_pvt BFA FCXP private functions
39 */
40
41static void
42claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
43{
44 u8 *dm_kva = NULL;
45 u64 dm_pa;
46 u32 buf_pool_sz;
47
48 dm_kva = bfa_meminfo_dma_virt(mi);
49 dm_pa = bfa_meminfo_dma_phys(mi);
50
51 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
52
53 /*
54 * Initialize the fcxp req payload list
55 */
56 mod->req_pld_list_kva = dm_kva;
57 mod->req_pld_list_pa = dm_pa;
58 dm_kva += buf_pool_sz;
59 dm_pa += buf_pool_sz;
60 bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
61
62 /*
63 * Initialize the fcxp rsp payload list
64 */
65 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
66 mod->rsp_pld_list_kva = dm_kva;
67 mod->rsp_pld_list_pa = dm_pa;
68 dm_kva += buf_pool_sz;
69 dm_pa += buf_pool_sz;
70 bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
71
72 bfa_meminfo_dma_virt(mi) = dm_kva;
73 bfa_meminfo_dma_phys(mi) = dm_pa;
74}
75
76static void
77claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
78{
79 u16 i;
80 struct bfa_fcxp_s *fcxp;
81
82 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
83 bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
84
85 INIT_LIST_HEAD(&mod->fcxp_free_q);
86 INIT_LIST_HEAD(&mod->fcxp_active_q);
87
88 mod->fcxp_list = fcxp;
89
90 for (i = 0; i < mod->num_fcxps; i++) {
91 fcxp->fcxp_mod = mod;
92 fcxp->fcxp_tag = i;
93
94 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
95 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
96 fcxp->reqq_waiting = BFA_FALSE;
97
98 fcxp = fcxp + 1;
99 }
100
101 bfa_meminfo_kva(mi) = (void *)fcxp;
102}
103
104static void
105bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
106 u32 *dm_len)
107{
108 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
109
110 if (num_fcxp_reqs == 0)
111 return;
112
113 /*
114 * Account for req/rsp payload
115 */
116 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
117 if (cfg->drvcfg.min_cfg)
118 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
119 else
120 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
121
122 /*
123 * Account for fcxp structs
124 */
125 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
126}
127
128static void
129bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
130 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
131{
132 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
133
134 bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
135 mod->bfa = bfa;
136 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
137
138 /**
139 * Initialize FCXP request and response payload sizes.
140 */
141 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
142 if (!cfg->drvcfg.min_cfg)
143 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
144
145 INIT_LIST_HEAD(&mod->wait_q);
146
147 claim_fcxp_req_rsp_mem(mod, meminfo);
148 claim_fcxps_mem(mod, meminfo);
149}
150
151static void
Jing Huang7725ccf2009-09-23 17:46:15 -0700152bfa_fcxp_detach(struct bfa_s *bfa)
153{
154}
155
156static void
157bfa_fcxp_start(struct bfa_s *bfa)
158{
159}
160
161static void
162bfa_fcxp_stop(struct bfa_s *bfa)
163{
164}
165
166static void
167bfa_fcxp_iocdisable(struct bfa_s *bfa)
168{
169 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
170 struct bfa_fcxp_s *fcxp;
171 struct list_head *qe, *qen;
172
173 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
174 fcxp = (struct bfa_fcxp_s *) qe;
175 if (fcxp->caller == NULL) {
176 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
177 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
178 bfa_fcxp_free(fcxp);
179 } else {
180 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
181 bfa_cb_queue(bfa, &fcxp->hcb_qe,
182 __bfa_fcxp_send_cbfn, fcxp);
183 }
184 }
185}
186
187static struct bfa_fcxp_s *
188bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
189{
190 struct bfa_fcxp_s *fcxp;
191
192 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
193
194 if (fcxp)
195 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
196
Jing Huangf8ceafd2009-09-25 12:29:54 -0700197 return fcxp;
Jing Huang7725ccf2009-09-23 17:46:15 -0700198}
199
200static void
201bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
202{
203 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
204 struct bfa_fcxp_wqe_s *wqe;
205
206 bfa_q_deq(&mod->wait_q, &wqe);
207 if (wqe) {
208 bfa_trc(mod->bfa, fcxp->fcxp_tag);
209 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
210 return;
211 }
212
213 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
214 list_del(&fcxp->qe);
215 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
216}
217
218static void
219bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
220 bfa_status_t req_status, u32 rsp_len,
221 u32 resid_len, struct fchs_s *rsp_fchs)
222{
Jing Huanged969322010-07-08 19:45:56 -0700223 /* discarded fcxp completion */
Jing Huang7725ccf2009-09-23 17:46:15 -0700224}
225
226static void
227__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
228{
229 struct bfa_fcxp_s *fcxp = cbarg;
230
231 if (complete) {
232 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
233 fcxp->rsp_status, fcxp->rsp_len,
234 fcxp->residue_len, &fcxp->rsp_fchs);
235 } else {
236 bfa_fcxp_free(fcxp);
237 }
238}
239
240static void
241hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
242{
243 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
244 struct bfa_fcxp_s *fcxp;
245 u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
246
247 bfa_trc(bfa, fcxp_tag);
248
249 fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
250
251 /**
252 * @todo f/w should not set residue to non-0 when everything
253 * is received.
254 */
255 if (fcxp_rsp->req_status == BFA_STATUS_OK)
256 fcxp_rsp->residue_len = 0;
257 else
258 fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
259
260 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
261
262 bfa_assert(fcxp->send_cbfn != NULL);
263
264 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
265
266 if (fcxp->send_cbfn != NULL) {
267 if (fcxp->caller == NULL) {
268 bfa_trc(mod->bfa, fcxp->fcxp_tag);
269
270 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
271 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
272 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
273 /*
274 * fcxp automatically freed on return from the callback
275 */
276 bfa_fcxp_free(fcxp);
277 } else {
278 bfa_trc(mod->bfa, fcxp->fcxp_tag);
279 fcxp->rsp_status = fcxp_rsp->req_status;
280 fcxp->rsp_len = fcxp_rsp->rsp_len;
281 fcxp->residue_len = fcxp_rsp->residue_len;
282 fcxp->rsp_fchs = fcxp_rsp->fchs;
283
284 bfa_cb_queue(bfa, &fcxp->hcb_qe,
285 __bfa_fcxp_send_cbfn, fcxp);
286 }
287 } else {
288 bfa_trc(bfa, fcxp_tag);
289 }
290}
291
292static void
293hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
294{
295 union bfi_addr_u sga_zero = { {0} };
296
297 sge->sg_len = reqlen;
298 sge->flags = BFI_SGE_DATA_LAST;
299 bfa_dma_addr_set(sge[0].sga, req_pa);
300 bfa_sge_to_be(sge);
301 sge++;
302
303 sge->sga = sga_zero;
304 sge->sg_len = reqlen;
305 sge->flags = BFI_SGE_PGDLEN;
306 bfa_sge_to_be(sge);
307}
308
309static void
310hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
311 struct fchs_s *fchs)
312{
313 /*
314 * TODO: TX ox_id
315 */
316 if (reqlen > 0) {
317 if (fcxp->use_ireqbuf) {
318 u32 pld_w0 =
319 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
320
321 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
322 BFA_PL_EID_TX,
323 reqlen + sizeof(struct fchs_s), fchs, pld_w0);
324 } else {
325 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
326 BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s),
327 fchs);
328 }
329 } else {
330 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
331 reqlen + sizeof(struct fchs_s), fchs);
332 }
333}
334
335static void
336hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
337 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
338{
339 if (fcxp_rsp->rsp_len > 0) {
340 if (fcxp->use_irspbuf) {
341 u32 pld_w0 =
342 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
343
344 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
345 BFA_PL_EID_RX,
346 (u16) fcxp_rsp->rsp_len,
347 &fcxp_rsp->fchs, pld_w0);
348 } else {
349 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
350 BFA_PL_EID_RX,
351 (u16) fcxp_rsp->rsp_len,
352 &fcxp_rsp->fchs);
353 }
354 } else {
355 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
356 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
357 }
358}
359
360/**
361 * Handler to resume sending fcxp when space in available in cpe queue.
362 */
363static void
364bfa_fcxp_qresume(void *cbarg)
365{
366 struct bfa_fcxp_s *fcxp = cbarg;
367 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
368 struct bfi_fcxp_send_req_s *send_req;
369
370 fcxp->reqq_waiting = BFA_FALSE;
371 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
372 bfa_fcxp_queue(fcxp, send_req);
373}
374
375/**
376 * Queue fcxp send request to foimrware.
377 */
378static void
379bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
380{
381 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
382 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
383 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
384 struct bfa_rport_s *rport = reqi->bfa_rport;
385
386 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
387 bfa_lpuid(bfa));
388
389 send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
390 if (rport) {
391 send_req->rport_fw_hndl = rport->fw_handle;
392 send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
393 if (send_req->max_frmsz == 0)
394 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
395 } else {
396 send_req->rport_fw_hndl = 0;
397 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
398 }
399
400 send_req->vf_id = bfa_os_htons(reqi->vf_id);
401 send_req->lp_tag = reqi->lp_tag;
402 send_req->class = reqi->class;
403 send_req->rsp_timeout = rspi->rsp_timeout;
404 send_req->cts = reqi->cts;
405 send_req->fchs = reqi->fchs;
406
407 send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
408 send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
409
410 /*
411 * setup req sgles
412 */
413 if (fcxp->use_ireqbuf == 1) {
414 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
415 BFA_FCXP_REQ_PLD_PA(fcxp));
416 } else {
417 if (fcxp->nreq_sgles > 0) {
418 bfa_assert(fcxp->nreq_sgles == 1);
419 hal_fcxp_set_local_sges(send_req->req_sge,
420 reqi->req_tot_len,
421 fcxp->req_sga_cbfn(fcxp->caller,
422 0));
423 } else {
424 bfa_assert(reqi->req_tot_len == 0);
425 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
426 }
427 }
428
429 /*
430 * setup rsp sgles
431 */
432 if (fcxp->use_irspbuf == 1) {
433 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
434
435 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
436 BFA_FCXP_RSP_PLD_PA(fcxp));
437
438 } else {
439 if (fcxp->nrsp_sgles > 0) {
440 bfa_assert(fcxp->nrsp_sgles == 1);
441 hal_fcxp_set_local_sges(send_req->rsp_sge,
442 rspi->rsp_maxlen,
443 fcxp->rsp_sga_cbfn(fcxp->caller,
444 0));
445 } else {
446 bfa_assert(rspi->rsp_maxlen == 0);
447 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
448 }
449 }
450
451 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
452
453 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
454
455 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
456 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
457}
458
459
460/**
461 * hal_fcxp_api BFA FCXP API
462 */
463
464/**
465 * Allocate an FCXP instance to send a response or to send a request
466 * that has a response. Request/response buffers are allocated by caller.
467 *
468 * @param[in] bfa BFA bfa instance
469 * @param[in] nreq_sgles Number of SG elements required for request
470 * buffer. 0, if fcxp internal buffers are used.
471 * Use bfa_fcxp_get_reqbuf() to get the
472 * internal req buffer.
473 * @param[in] req_sgles SG elements describing request buffer. Will be
474 * copied in by BFA and hence can be freed on
475 * return from this function.
476 * @param[in] get_req_sga function ptr to be called to get a request SG
477 * Address (given the sge index).
478 * @param[in] get_req_sglen function ptr to be called to get a request SG
479 * len (given the sge index).
480 * @param[in] get_rsp_sga function ptr to be called to get a response SG
481 * Address (given the sge index).
482 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
483 * len (given the sge index).
484 *
485 * @return FCXP instance. NULL on failure.
486 */
487struct bfa_fcxp_s *
488bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
489 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
490 bfa_fcxp_get_sglen_t req_sglen_cbfn,
491 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
492 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
493{
494 struct bfa_fcxp_s *fcxp = NULL;
495 u32 nreq_sgpg, nrsp_sgpg;
496
497 bfa_assert(bfa != NULL);
498
499 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
500 if (fcxp == NULL)
Jing Huangf8ceafd2009-09-25 12:29:54 -0700501 return NULL;
Jing Huang7725ccf2009-09-23 17:46:15 -0700502
503 bfa_trc(bfa, fcxp->fcxp_tag);
504
505 fcxp->caller = caller;
506
507 if (nreq_sgles == 0) {
508 fcxp->use_ireqbuf = 1;
509 } else {
510 bfa_assert(req_sga_cbfn != NULL);
511 bfa_assert(req_sglen_cbfn != NULL);
512
513 fcxp->use_ireqbuf = 0;
514 fcxp->req_sga_cbfn = req_sga_cbfn;
515 fcxp->req_sglen_cbfn = req_sglen_cbfn;
516
517 fcxp->nreq_sgles = nreq_sgles;
518
519 /*
520 * alloc required sgpgs
521 */
522 if (nreq_sgles > BFI_SGE_INLINE) {
523 nreq_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
524
Jing Huanged969322010-07-08 19:45:56 -0700525 if (bfa_sgpg_malloc(bfa, &fcxp->req_sgpg_q, nreq_sgpg)
Jing Huang7725ccf2009-09-23 17:46:15 -0700526 != BFA_STATUS_OK) {
Jing Huang7725ccf2009-09-23 17:46:15 -0700527 /*
528 * TODO
529 */
530 }
531 }
532 }
533
534 if (nrsp_sgles == 0) {
535 fcxp->use_irspbuf = 1;
536 } else {
537 bfa_assert(rsp_sga_cbfn != NULL);
538 bfa_assert(rsp_sglen_cbfn != NULL);
539
540 fcxp->use_irspbuf = 0;
541 fcxp->rsp_sga_cbfn = rsp_sga_cbfn;
542 fcxp->rsp_sglen_cbfn = rsp_sglen_cbfn;
543
544 fcxp->nrsp_sgles = nrsp_sgles;
545 /*
546 * alloc required sgpgs
547 */
548 if (nrsp_sgles > BFI_SGE_INLINE) {
549 nrsp_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
550
551 if (bfa_sgpg_malloc
552 (bfa, &fcxp->rsp_sgpg_q, nrsp_sgpg)
553 != BFA_STATUS_OK) {
554 /* bfa_sgpg_wait(bfa, &fcxp->rsp_sgpg_wqe,
555 nrsp_sgpg); */
556 /*
557 * TODO
558 */
559 }
560 }
561 }
562
Jing Huangf8ceafd2009-09-25 12:29:54 -0700563 return fcxp;
Jing Huang7725ccf2009-09-23 17:46:15 -0700564}
565
566/**
567 * Get the internal request buffer pointer
568 *
569 * @param[in] fcxp BFA fcxp pointer
570 *
571 * @return pointer to the internal request buffer
572 */
573void *
574bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
575{
576 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
577 void *reqbuf;
578
579 bfa_assert(fcxp->use_ireqbuf == 1);
580 reqbuf = ((u8 *)mod->req_pld_list_kva) +
581 fcxp->fcxp_tag * mod->req_pld_sz;
582 return reqbuf;
583}
584
585u32
586bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
587{
588 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
589
590 return mod->req_pld_sz;
591}
592
593/**
594 * Get the internal response buffer pointer
595 *
596 * @param[in] fcxp BFA fcxp pointer
597 *
598 * @return pointer to the internal request buffer
599 */
600void *
601bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
602{
603 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
604 void *rspbuf;
605
606 bfa_assert(fcxp->use_irspbuf == 1);
607
608 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
609 fcxp->fcxp_tag * mod->rsp_pld_sz;
610 return rspbuf;
611}
612
613/**
614 * Free the BFA FCXP
615 *
616 * @param[in] fcxp BFA fcxp pointer
617 *
618 * @return void
619 */
620void
621bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
622{
623 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
624
625 bfa_assert(fcxp != NULL);
626 bfa_trc(mod->bfa, fcxp->fcxp_tag);
627 bfa_fcxp_put(fcxp);
628}
629
630/**
631 * Send a FCXP request
632 *
633 * @param[in] fcxp BFA fcxp pointer
634 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
635 * @param[in] vf_id virtual Fabric ID
636 * @param[in] lp_tag lport tag
637 * @param[in] cts use Continous sequence
638 * @param[in] cos fc Class of Service
639 * @param[in] reqlen request length, does not include FCHS length
640 * @param[in] fchs fc Header Pointer. The header content will be copied
641 * in by BFA.
642 *
643 * @param[in] cbfn call back function to be called on receiving
644 * the response
645 * @param[in] cbarg arg for cbfn
646 * @param[in] rsp_timeout
647 * response timeout
648 *
649 * @return bfa_status_t
650 */
651void
652bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
653 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
654 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
655 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
656{
657 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
658 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
659 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
660 struct bfi_fcxp_send_req_s *send_req;
661
662 bfa_trc(bfa, fcxp->fcxp_tag);
663
664 /**
665 * setup request/response info
666 */
667 reqi->bfa_rport = rport;
668 reqi->vf_id = vf_id;
669 reqi->lp_tag = lp_tag;
670 reqi->class = cos;
671 rspi->rsp_timeout = rsp_timeout;
672 reqi->cts = cts;
673 reqi->fchs = *fchs;
674 reqi->req_tot_len = reqlen;
675 rspi->rsp_maxlen = rsp_maxlen;
676 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
677 fcxp->send_cbarg = cbarg;
678
679 /**
Jing Huanged969322010-07-08 19:45:56 -0700680 * If no room in CPE queue, wait for space in request queue
Jing Huang7725ccf2009-09-23 17:46:15 -0700681 */
682 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
683 if (!send_req) {
684 bfa_trc(bfa, fcxp->fcxp_tag);
685 fcxp->reqq_waiting = BFA_TRUE;
686 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
687 return;
688 }
689
690 bfa_fcxp_queue(fcxp, send_req);
691}
692
693/**
694 * Abort a BFA FCXP
695 *
696 * @param[in] fcxp BFA fcxp pointer
697 *
698 * @return void
699 */
700bfa_status_t
701bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
702{
703 bfa_assert(0);
Jing Huangf8ceafd2009-09-25 12:29:54 -0700704 return BFA_STATUS_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -0700705}
706
707void
708bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
709 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg)
710{
711 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
712
713 bfa_assert(list_empty(&mod->fcxp_free_q));
714
715 wqe->alloc_cbfn = alloc_cbfn;
716 wqe->alloc_cbarg = alloc_cbarg;
717 list_add_tail(&wqe->qe, &mod->wait_q);
718}
719
720void
721bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
722{
723 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
724
725 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
726 list_del(&wqe->qe);
727}
728
729void
730bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
731{
732 /**
733 * If waiting for room in request queue, cancel reqq wait
734 * and free fcxp.
735 */
736 if (fcxp->reqq_waiting) {
737 fcxp->reqq_waiting = BFA_FALSE;
738 bfa_reqq_wcancel(&fcxp->reqq_wqe);
739 bfa_fcxp_free(fcxp);
740 return;
741 }
742
743 fcxp->send_cbfn = bfa_fcxp_null_comp;
744}
745
746
747
748/**
749 * hal_fcxp_public BFA FCXP public functions
750 */
751
752void
753bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
754{
755 switch (msg->mhdr.msg_id) {
756 case BFI_FCXP_I2H_SEND_RSP:
757 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
758 break;
759
760 default:
761 bfa_trc(bfa, msg->mhdr.msg_id);
762 bfa_assert(0);
763 }
764}
765
766u32
767bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
768{
769 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
770
771 return mod->rsp_pld_sz;
772}
773
774