blob: 552bb250a21056dc39882af0aa97cfeb06f76875 [file] [log] [blame]
Krishna Gudipatib85daaf2011-06-13 15:55:11 -07001/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <linux/uaccess.h>
19#include "bfad_drv.h"
20#include "bfad_im.h"
21#include "bfad_bsg.h"
22
23BFA_TRC_FILE(LDRV, BSG);
24
25/* bfad_im_bsg_get_kobject - increment the bfa refcnt */
26static void
27bfad_im_bsg_get_kobject(struct fc_bsg_job *job)
28{
29 struct Scsi_Host *shost = job->shost;
30 unsigned long flags;
31
32 spin_lock_irqsave(shost->host_lock, flags);
33 __module_get(shost->dma_dev->driver->owner);
34 spin_unlock_irqrestore(shost->host_lock, flags);
35}
36
37/* bfad_im_bsg_put_kobject - decrement the bfa refcnt */
38static void
39bfad_im_bsg_put_kobject(struct fc_bsg_job *job)
40{
41 struct Scsi_Host *shost = job->shost;
42 unsigned long flags;
43
44 spin_lock_irqsave(shost->host_lock, flags);
45 module_put(shost->dma_dev->driver->owner);
46 spin_unlock_irqrestore(shost->host_lock, flags);
47}
48
Krishna Gudipati60138062011-06-24 20:25:15 -070049int
50bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
51{
52 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
53 int rc = 0;
54 unsigned long flags;
55
56 spin_lock_irqsave(&bfad->bfad_lock, flags);
57 /* If IOC is not in disabled state - return */
58 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
59 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
60 iocmd->status = BFA_STATUS_IOC_FAILURE;
61 return rc;
62 }
63
64 init_completion(&bfad->enable_comp);
65 bfa_iocfc_enable(&bfad->bfa);
66 iocmd->status = BFA_STATUS_OK;
67 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
68 wait_for_completion(&bfad->enable_comp);
69
70 return rc;
71}
72
73int
74bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
75{
76 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
77 int rc = 0;
78 unsigned long flags;
79
80 spin_lock_irqsave(&bfad->bfad_lock, flags);
81 if (bfad->disable_active) {
82 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
83 return EBUSY;
84 }
85
86 bfad->disable_active = BFA_TRUE;
87 init_completion(&bfad->disable_comp);
88 bfa_iocfc_disable(&bfad->bfa);
89 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
90
91 wait_for_completion(&bfad->disable_comp);
92 bfad->disable_active = BFA_FALSE;
93 iocmd->status = BFA_STATUS_OK;
94
95 return rc;
96}
97
Krishna Gudipatib85daaf2011-06-13 15:55:11 -070098static int
99bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
100{
101 int i;
102 struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
103 struct bfad_im_port_s *im_port;
104 struct bfa_port_attr_s pattr;
105 unsigned long flags;
106
107 spin_lock_irqsave(&bfad->bfad_lock, flags);
108 bfa_fcport_get_attr(&bfad->bfa, &pattr);
109 iocmd->nwwn = pattr.nwwn;
110 iocmd->pwwn = pattr.pwwn;
111 iocmd->ioc_type = bfa_get_type(&bfad->bfa);
112 iocmd->mac = bfa_get_mac(&bfad->bfa);
113 iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
114 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
115 iocmd->factorynwwn = pattr.factorynwwn;
116 iocmd->factorypwwn = pattr.factorypwwn;
117 im_port = bfad->pport.im_port;
118 iocmd->host = im_port->shost->host_no;
119 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
120
121 strcpy(iocmd->name, bfad->adapter_name);
122 strcpy(iocmd->port_name, bfad->port_name);
123 strcpy(iocmd->hwpath, bfad->pci_name);
124
125 /* set adapter hw path */
126 strcpy(iocmd->adapter_hwpath, bfad->pci_name);
127 i = strlen(iocmd->adapter_hwpath) - 1;
128 while (iocmd->adapter_hwpath[i] != '.')
129 i--;
130 iocmd->adapter_hwpath[i] = '\0';
131 iocmd->status = BFA_STATUS_OK;
132 return 0;
133}
134
135static int
136bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
137{
138 struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
139 unsigned long flags;
140
141 spin_lock_irqsave(&bfad->bfad_lock, flags);
142 bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
143 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
144
145 /* fill in driver attr info */
146 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
147 strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
148 BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
149 strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
150 iocmd->ioc_attr.adapter_attr.fw_ver);
151 strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
152 iocmd->ioc_attr.adapter_attr.optrom_ver);
153
154 /* copy chip rev info first otherwise it will be overwritten */
155 memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
156 sizeof(bfad->pci_attr.chip_rev));
157 memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
158 sizeof(struct bfa_ioc_pci_attr_s));
159
160 iocmd->status = BFA_STATUS_OK;
161 return 0;
162}
163
Krishna Gudipati60138062011-06-24 20:25:15 -0700164int
165bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
166{
167 struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
168
169 bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
170 iocmd->status = BFA_STATUS_OK;
171 return 0;
172}
173
174int
175bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
176 unsigned int payload_len)
177{
178 struct bfa_bsg_ioc_fwstats_s *iocmd =
179 (struct bfa_bsg_ioc_fwstats_s *)cmd;
180 void *iocmd_bufptr;
181 unsigned long flags;
182
183 if (bfad_chk_iocmd_sz(payload_len,
184 sizeof(struct bfa_bsg_ioc_fwstats_s),
185 sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
186 iocmd->status = BFA_STATUS_VERSION_FAIL;
187 goto out;
188 }
189
190 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
191 spin_lock_irqsave(&bfad->bfad_lock, flags);
192 iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
193 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
194
195 if (iocmd->status != BFA_STATUS_OK) {
196 bfa_trc(bfad, iocmd->status);
197 goto out;
198 }
199out:
200 bfa_trc(bfad, 0x6666);
201 return 0;
202}
203
204int
205bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
206{
207 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
208
209 iocmd->status = BFA_STATUS_OK;
210 bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
211
212 return 0;
213}
214
215int
216bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
217{
218 struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
219 unsigned long flags;
220
221 spin_lock_irqsave(&bfad->bfad_lock, flags);
222 iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
223 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
224
225 return 0;
226}
227
228int
229bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
230{
231 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
232 struct bfad_hal_comp fcomp;
233 unsigned long flags;
234
235 init_completion(&fcomp.comp);
236 spin_lock_irqsave(&bfad->bfad_lock, flags);
237 iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
238 bfad_hcb_comp, &fcomp);
239 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
240 if (iocmd->status != BFA_STATUS_OK) {
241 bfa_trc(bfad, iocmd->status);
242 return 0;
243 }
244 wait_for_completion(&fcomp.comp);
245 iocmd->status = fcomp.status;
246 return 0;
247}
248
249int
250bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
251{
252 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
253 struct bfad_hal_comp fcomp;
254 unsigned long flags;
255
256 init_completion(&fcomp.comp);
257 spin_lock_irqsave(&bfad->bfad_lock, flags);
258 iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
259 bfad_hcb_comp, &fcomp);
260 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
261
262 if (iocmd->status != BFA_STATUS_OK) {
263 bfa_trc(bfad, iocmd->status);
264 return 0;
265 }
266 wait_for_completion(&fcomp.comp);
267 iocmd->status = fcomp.status;
268 return 0;
269}
270
Krishna Gudipatib85daaf2011-06-13 15:55:11 -0700271static int
272bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
273{
274 struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
275 struct bfa_lport_attr_s port_attr;
276 unsigned long flags;
277
278 spin_lock_irqsave(&bfad->bfad_lock, flags);
279 bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
280 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
281 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
282
283 if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
284 iocmd->attr.pid = port_attr.pid;
285 else
286 iocmd->attr.pid = 0;
287
288 iocmd->attr.port_type = port_attr.port_type;
289 iocmd->attr.loopback = port_attr.loopback;
290 iocmd->attr.authfail = port_attr.authfail;
291 strncpy(iocmd->attr.port_symname.symname,
292 port_attr.port_cfg.sym_name.symname,
293 sizeof(port_attr.port_cfg.sym_name.symname));
294
295 iocmd->status = BFA_STATUS_OK;
296 return 0;
297}
298
Krishna Gudipati60138062011-06-24 20:25:15 -0700299int
300bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
301 unsigned int payload_len)
302{
303 struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
304 struct bfad_hal_comp fcomp;
305 void *iocmd_bufptr;
306 unsigned long flags;
307
308 if (bfad_chk_iocmd_sz(payload_len,
309 sizeof(struct bfa_bsg_port_stats_s),
310 sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
311 iocmd->status = BFA_STATUS_VERSION_FAIL;
312 return 0;
313 }
314
315 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
316
317 init_completion(&fcomp.comp);
318 spin_lock_irqsave(&bfad->bfad_lock, flags);
319 iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
320 iocmd_bufptr, bfad_hcb_comp, &fcomp);
321 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
322 if (iocmd->status != BFA_STATUS_OK) {
323 bfa_trc(bfad, iocmd->status);
324 goto out;
325 }
326
327 wait_for_completion(&fcomp.comp);
328 iocmd->status = fcomp.status;
329out:
330 return 0;
331}
332
Krishna Gudipatib85daaf2011-06-13 15:55:11 -0700333static int
334bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
335{
336 struct bfa_fcs_lport_s *fcs_port;
337 struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
338 unsigned long flags;
339
340 spin_lock_irqsave(&bfad->bfad_lock, flags);
341 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
342 iocmd->vf_id, iocmd->pwwn);
343 if (fcs_port == NULL) {
344 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
345 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
346 goto out;
347 }
348
349 bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
350 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
351 iocmd->status = BFA_STATUS_OK;
352out:
353 return 0;
354}
355
Krishna Gudipati60138062011-06-24 20:25:15 -0700356int
357bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
358{
359 struct bfa_fcs_lport_s *fcs_port;
360 struct bfa_bsg_lport_stats_s *iocmd =
361 (struct bfa_bsg_lport_stats_s *)cmd;
362 unsigned long flags;
363
364 spin_lock_irqsave(&bfad->bfad_lock, flags);
365 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
366 iocmd->vf_id, iocmd->pwwn);
367 if (fcs_port == NULL) {
368 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
369 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
370 goto out;
371 }
372
373 bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
374 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
375 iocmd->status = BFA_STATUS_OK;
376out:
377 return 0;
378}
379
380int
381bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
382{
383 struct bfa_fcs_lport_s *fcs_port;
384 struct bfa_bsg_lport_iostats_s *iocmd =
385 (struct bfa_bsg_lport_iostats_s *)cmd;
386 unsigned long flags;
387
388 spin_lock_irqsave(&bfad->bfad_lock, flags);
389 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
390 iocmd->vf_id, iocmd->pwwn);
391 if (fcs_port == NULL) {
392 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
393 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
394 goto out;
395 }
396
397 bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
398 fcs_port->lp_tag);
399 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
400 iocmd->status = BFA_STATUS_OK;
401out:
402 return 0;
403}
404
405int
406bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
407 unsigned int payload_len)
408{
409 struct bfa_bsg_lport_get_rports_s *iocmd =
410 (struct bfa_bsg_lport_get_rports_s *)cmd;
411 struct bfa_fcs_lport_s *fcs_port;
412 unsigned long flags;
413 void *iocmd_bufptr;
414
415 if (iocmd->nrports == 0)
416 return EINVAL;
417
418 if (bfad_chk_iocmd_sz(payload_len,
419 sizeof(struct bfa_bsg_lport_get_rports_s),
420 sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) {
421 iocmd->status = BFA_STATUS_VERSION_FAIL;
422 return 0;
423 }
424
425 iocmd_bufptr = (char *)iocmd +
426 sizeof(struct bfa_bsg_lport_get_rports_s);
427 spin_lock_irqsave(&bfad->bfad_lock, flags);
428 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
429 iocmd->vf_id, iocmd->pwwn);
430 if (fcs_port == NULL) {
431 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
432 bfa_trc(bfad, 0);
433 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
434 goto out;
435 }
436
437 bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr,
438 &iocmd->nrports);
439 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
440 iocmd->status = BFA_STATUS_OK;
441out:
442 return 0;
443}
444
445int
446bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
447{
448 struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
449 struct bfa_fcs_lport_s *fcs_port;
450 struct bfa_fcs_rport_s *fcs_rport;
451 unsigned long flags;
452
453 spin_lock_irqsave(&bfad->bfad_lock, flags);
454 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
455 iocmd->vf_id, iocmd->pwwn);
456 if (fcs_port == NULL) {
457 bfa_trc(bfad, 0);
458 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
459 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
460 goto out;
461 }
462
463 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
464 if (fcs_rport == NULL) {
465 bfa_trc(bfad, 0);
466 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
467 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
468 goto out;
469 }
470
471 bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
472 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
473 iocmd->status = BFA_STATUS_OK;
474out:
475 return 0;
476}
477
Krishna Gudipatib85daaf2011-06-13 15:55:11 -0700478static int
479bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
480{
481 struct bfa_bsg_rport_scsi_addr_s *iocmd =
482 (struct bfa_bsg_rport_scsi_addr_s *)cmd;
483 struct bfa_fcs_lport_s *fcs_port;
484 struct bfa_fcs_itnim_s *fcs_itnim;
485 struct bfad_itnim_s *drv_itnim;
486 unsigned long flags;
487
488 spin_lock_irqsave(&bfad->bfad_lock, flags);
489 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
490 iocmd->vf_id, iocmd->pwwn);
491 if (fcs_port == NULL) {
492 bfa_trc(bfad, 0);
493 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
494 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
495 goto out;
496 }
497
498 fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
499 if (fcs_itnim == NULL) {
500 bfa_trc(bfad, 0);
501 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
502 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
503 goto out;
504 }
505
506 drv_itnim = fcs_itnim->itnim_drv;
507
508 if (drv_itnim && drv_itnim->im_port)
509 iocmd->host = drv_itnim->im_port->shost->host_no;
510 else {
511 bfa_trc(bfad, 0);
512 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
513 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
514 goto out;
515 }
516
517 iocmd->target = drv_itnim->scsi_tgt_id;
518 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
519
520 iocmd->bus = 0;
521 iocmd->lun = 0;
522 iocmd->status = BFA_STATUS_OK;
523out:
524 return 0;
525}
526
Krishna Gudipati60138062011-06-24 20:25:15 -0700527int
528bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
529{
530 struct bfa_bsg_rport_stats_s *iocmd =
531 (struct bfa_bsg_rport_stats_s *)cmd;
532 struct bfa_fcs_lport_s *fcs_port;
533 struct bfa_fcs_rport_s *fcs_rport;
534 unsigned long flags;
535
536 spin_lock_irqsave(&bfad->bfad_lock, flags);
537 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
538 iocmd->vf_id, iocmd->pwwn);
539 if (fcs_port == NULL) {
540 bfa_trc(bfad, 0);
541 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
542 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
543 goto out;
544 }
545
546 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
547 if (fcs_rport == NULL) {
548 bfa_trc(bfad, 0);
549 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
550 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
551 goto out;
552 }
553
554 memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
555 sizeof(struct bfa_rport_stats_s));
556 memcpy((void *)&iocmd->stats.hal_stats,
557 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
558 sizeof(struct bfa_rport_hal_stats_s));
559
560 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
561 iocmd->status = BFA_STATUS_OK;
562out:
563 return 0;
564}
565
Krishna Gudipatib85daaf2011-06-13 15:55:11 -0700566static int
567bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
568 unsigned int payload_len)
569{
570 struct bfa_bsg_fabric_get_lports_s *iocmd =
571 (struct bfa_bsg_fabric_get_lports_s *)cmd;
572 bfa_fcs_vf_t *fcs_vf;
573 uint32_t nports = iocmd->nports;
574 unsigned long flags;
575 void *iocmd_bufptr;
576
577 if (nports == 0) {
578 iocmd->status = BFA_STATUS_EINVAL;
579 goto out;
580 }
581
582 if (bfad_chk_iocmd_sz(payload_len,
583 sizeof(struct bfa_bsg_fabric_get_lports_s),
584 sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
585 iocmd->status = BFA_STATUS_VERSION_FAIL;
586 goto out;
587 }
588
589 iocmd_bufptr = (char *)iocmd +
590 sizeof(struct bfa_bsg_fabric_get_lports_s);
591
592 spin_lock_irqsave(&bfad->bfad_lock, flags);
593 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
594 if (fcs_vf == NULL) {
595 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
596 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
597 goto out;
598 }
599 bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
600 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
601
602 iocmd->nports = nports;
603 iocmd->status = BFA_STATUS_OK;
604out:
605 return 0;
606}
607
Krishna Gudipati60138062011-06-24 20:25:15 -0700608int
609bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
610{
611 struct bfa_bsg_fcpim_modstats_s *iocmd =
612 (struct bfa_bsg_fcpim_modstats_s *)cmd;
613 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
614 struct list_head *qe, *qen;
615 struct bfa_itnim_s *itnim;
616 unsigned long flags;
617
618 spin_lock_irqsave(&bfad->bfad_lock, flags);
619 /* accumulate IO stats from itnim */
620 memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
621 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
622 itnim = (struct bfa_itnim_s *) qe;
623 bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
624 }
625 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
626 iocmd->status = BFA_STATUS_OK;
627 return 0;
628}
629
630int
631bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
632{
633 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
634 (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
635 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
636 unsigned long flags;
637
638 spin_lock_irqsave(&bfad->bfad_lock, flags);
639 memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
640 sizeof(struct bfa_fcpim_del_itn_stats_s));
641 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
642
643 iocmd->status = BFA_STATUS_OK;
644 return 0;
645}
646
Krishna Gudipatib85daaf2011-06-13 15:55:11 -0700647static int
648bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
649{
650 struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
651 struct bfa_fcs_lport_s *fcs_port;
652 unsigned long flags;
653
654 spin_lock_irqsave(&bfad->bfad_lock, flags);
655 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
656 iocmd->vf_id, iocmd->lpwwn);
657 if (!fcs_port)
658 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
659 else
660 iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
661 iocmd->rpwwn, &iocmd->attr);
662 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
663 return 0;
664}
665
Krishna Gudipati60138062011-06-24 20:25:15 -0700666static int
667bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
668{
669 struct bfa_bsg_itnim_iostats_s *iocmd =
670 (struct bfa_bsg_itnim_iostats_s *)cmd;
671 struct bfa_fcs_lport_s *fcs_port;
672 struct bfa_fcs_itnim_s *itnim;
673 unsigned long flags;
674
675 spin_lock_irqsave(&bfad->bfad_lock, flags);
676 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
677 iocmd->vf_id, iocmd->lpwwn);
678 if (!fcs_port) {
679 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
680 bfa_trc(bfad, 0);
681 } else {
682 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
683 if (itnim == NULL)
684 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
685 else {
686 iocmd->status = BFA_STATUS_OK;
687 memcpy((void *)&iocmd->iostats, (void *)
688 &(bfa_fcs_itnim_get_halitn(itnim)->stats),
689 sizeof(struct bfa_itnim_iostats_s));
690 }
691 }
692 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
693 return 0;
694}
695
696static int
697bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
698{
699 struct bfa_bsg_itnim_itnstats_s *iocmd =
700 (struct bfa_bsg_itnim_itnstats_s *)cmd;
701 struct bfa_fcs_lport_s *fcs_port;
702 struct bfa_fcs_itnim_s *itnim;
703 unsigned long flags;
704
705 spin_lock_irqsave(&bfad->bfad_lock, flags);
706 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
707 iocmd->vf_id, iocmd->lpwwn);
708 if (!fcs_port) {
709 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
710 bfa_trc(bfad, 0);
711 } else {
712 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
713 if (itnim == NULL)
714 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
715 else {
716 iocmd->status = BFA_STATUS_OK;
717 bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
718 &iocmd->itnstats);
719 }
720 }
721 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
722 return 0;
723}
724
725int
726bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
727{
728 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
729 unsigned long flags;
730
731 spin_lock_irqsave(&bfad->bfad_lock, flags);
732 iocmd->status = bfa_fcport_enable(&bfad->bfa);
733 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
734
735 return 0;
736}
737
738int
739bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
740{
741 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
742 unsigned long flags;
743
744 spin_lock_irqsave(&bfad->bfad_lock, flags);
745 iocmd->status = bfa_fcport_disable(&bfad->bfa);
746 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
747
748 return 0;
749}
750
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700751int
752bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
753{
754 struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
755 struct bfad_hal_comp fcomp;
756 unsigned long flags;
757
758 init_completion(&fcomp.comp);
759 spin_lock_irqsave(&bfad->bfad_lock, flags);
760 iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
761 &iocmd->pcifn_cfg,
762 bfad_hcb_comp, &fcomp);
763 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
764 if (iocmd->status != BFA_STATUS_OK)
765 goto out;
766
767 wait_for_completion(&fcomp.comp);
768 iocmd->status = fcomp.status;
769out:
770 return 0;
771}
772
773int
774bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
775{
776 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
777 struct bfad_hal_comp fcomp;
778 unsigned long flags;
779
780 init_completion(&fcomp.comp);
781 spin_lock_irqsave(&bfad->bfad_lock, flags);
782 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
783 &iocmd->pcifn_id, iocmd->port,
784 iocmd->pcifn_class, iocmd->bandwidth,
785 bfad_hcb_comp, &fcomp);
786 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
787 if (iocmd->status != BFA_STATUS_OK)
788 goto out;
789
790 wait_for_completion(&fcomp.comp);
791 iocmd->status = fcomp.status;
792out:
793 return 0;
794}
795
796int
797bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
798{
799 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
800 struct bfad_hal_comp fcomp;
801 unsigned long flags;
802
803 init_completion(&fcomp.comp);
804 spin_lock_irqsave(&bfad->bfad_lock, flags);
805 iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
806 iocmd->pcifn_id,
807 bfad_hcb_comp, &fcomp);
808 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
809 if (iocmd->status != BFA_STATUS_OK)
810 goto out;
811
812 wait_for_completion(&fcomp.comp);
813 iocmd->status = fcomp.status;
814out:
815 return 0;
816}
817
818int
819bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
820{
821 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
822 struct bfad_hal_comp fcomp;
823 unsigned long flags;
824
825 init_completion(&fcomp.comp);
826 spin_lock_irqsave(&bfad->bfad_lock, flags);
827 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
828 iocmd->pcifn_id, iocmd->bandwidth,
829 bfad_hcb_comp, &fcomp);
830 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
831 bfa_trc(bfad, iocmd->status);
832 if (iocmd->status != BFA_STATUS_OK)
833 goto out;
834
835 wait_for_completion(&fcomp.comp);
836 iocmd->status = fcomp.status;
837 bfa_trc(bfad, iocmd->status);
838out:
839 return 0;
840}
841
842int
843bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
844{
845 struct bfa_bsg_adapter_cfg_mode_s *iocmd =
846 (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
847 struct bfad_hal_comp fcomp;
848 unsigned long flags = 0;
849
850 init_completion(&fcomp.comp);
851 spin_lock_irqsave(&bfad->bfad_lock, flags);
852 iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
853 iocmd->cfg.mode, iocmd->cfg.max_pf,
854 iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
855 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
856 if (iocmd->status != BFA_STATUS_OK)
857 goto out;
858
859 wait_for_completion(&fcomp.comp);
860 iocmd->status = fcomp.status;
861out:
862 return 0;
863}
864
865int
866bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
867{
868 struct bfa_bsg_port_cfg_mode_s *iocmd =
869 (struct bfa_bsg_port_cfg_mode_s *)cmd;
870 struct bfad_hal_comp fcomp;
871 unsigned long flags = 0;
872
873 init_completion(&fcomp.comp);
874 spin_lock_irqsave(&bfad->bfad_lock, flags);
875 iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
876 iocmd->instance, iocmd->cfg.mode,
877 iocmd->cfg.max_pf, iocmd->cfg.max_vf,
878 bfad_hcb_comp, &fcomp);
879 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
880 if (iocmd->status != BFA_STATUS_OK)
881 goto out;
882
883 wait_for_completion(&fcomp.comp);
884 iocmd->status = fcomp.status;
885out:
886 return 0;
887}
888
889int
890bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
891{
892 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
893 struct bfad_hal_comp fcomp;
894 unsigned long flags;
895
896 init_completion(&fcomp.comp);
897 spin_lock_irqsave(&bfad->bfad_lock, flags);
898 if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
899 iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
900 bfad_hcb_comp, &fcomp);
901 else
902 iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
903 bfad_hcb_comp, &fcomp);
904 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
905
906 if (iocmd->status != BFA_STATUS_OK)
907 goto out;
908
909 wait_for_completion(&fcomp.comp);
910 iocmd->status = fcomp.status;
911out:
912 return 0;
913}
914
Krishna Gudipatia7141342011-06-24 20:23:19 -0700915int
916bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
917{
918 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
919 unsigned long flags;
920 struct bfad_hal_comp fcomp;
921
922 init_completion(&fcomp.comp);
923 iocmd->status = BFA_STATUS_OK;
924 spin_lock_irqsave(&bfad->bfad_lock, flags);
925 iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
926 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
927
928 if (iocmd->status != BFA_STATUS_OK)
929 goto out;
930
931 wait_for_completion(&fcomp.comp);
932 iocmd->status = fcomp.status;
933out:
934 return 0;
935}
936
937int
938bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
939{
940 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
941 unsigned long flags;
942 struct bfad_hal_comp fcomp;
943
944 init_completion(&fcomp.comp);
945 iocmd->status = BFA_STATUS_OK;
946 spin_lock_irqsave(&bfad->bfad_lock, flags);
947 iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
948 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
949
950 if (iocmd->status != BFA_STATUS_OK)
951 goto out;
952
953 wait_for_completion(&fcomp.comp);
954 iocmd->status = fcomp.status;
955out:
956 return 0;
957}
958
959int
960bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
961{
962 struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
963 struct bfad_hal_comp fcomp;
964 unsigned long flags;
965
966 init_completion(&fcomp.comp);
967 iocmd->status = BFA_STATUS_OK;
968 spin_lock_irqsave(&bfad->bfad_lock, flags);
969 iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
970 bfad_hcb_comp, &fcomp);
971 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
972
973 if (iocmd->status != BFA_STATUS_OK)
974 goto out;
975
976 wait_for_completion(&fcomp.comp);
977 iocmd->status = fcomp.status;
978out:
979 return 0;
980}
981
Krishna Gudipati148d6102011-06-24 20:25:36 -0700982int
983bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
984{
985 struct bfa_bsg_cee_attr_s *iocmd =
986 (struct bfa_bsg_cee_attr_s *)cmd;
987 void *iocmd_bufptr;
988 struct bfad_hal_comp cee_comp;
989 unsigned long flags;
990
991 if (bfad_chk_iocmd_sz(payload_len,
992 sizeof(struct bfa_bsg_cee_attr_s),
993 sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
994 iocmd->status = BFA_STATUS_VERSION_FAIL;
995 return 0;
996 }
997
998 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
999
1000 cee_comp.status = 0;
1001 init_completion(&cee_comp.comp);
1002 mutex_lock(&bfad_mutex);
1003 spin_lock_irqsave(&bfad->bfad_lock, flags);
1004 iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
1005 bfad_hcb_comp, &cee_comp);
1006 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1007 if (iocmd->status != BFA_STATUS_OK) {
1008 mutex_unlock(&bfad_mutex);
1009 bfa_trc(bfad, 0x5555);
1010 goto out;
1011 }
1012 wait_for_completion(&cee_comp.comp);
1013 mutex_unlock(&bfad_mutex);
1014out:
1015 return 0;
1016}
1017
1018int
1019bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
1020 unsigned int payload_len)
1021{
1022 struct bfa_bsg_cee_stats_s *iocmd =
1023 (struct bfa_bsg_cee_stats_s *)cmd;
1024 void *iocmd_bufptr;
1025 struct bfad_hal_comp cee_comp;
1026 unsigned long flags;
1027
1028 if (bfad_chk_iocmd_sz(payload_len,
1029 sizeof(struct bfa_bsg_cee_stats_s),
1030 sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
1031 iocmd->status = BFA_STATUS_VERSION_FAIL;
1032 return 0;
1033 }
1034
1035 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
1036
1037 cee_comp.status = 0;
1038 init_completion(&cee_comp.comp);
1039 mutex_lock(&bfad_mutex);
1040 spin_lock_irqsave(&bfad->bfad_lock, flags);
1041 iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
1042 bfad_hcb_comp, &cee_comp);
1043 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1044 if (iocmd->status != BFA_STATUS_OK) {
1045 mutex_unlock(&bfad_mutex);
1046 bfa_trc(bfad, 0x5555);
1047 goto out;
1048 }
1049 wait_for_completion(&cee_comp.comp);
1050 mutex_unlock(&bfad_mutex);
1051out:
1052 return 0;
1053}
1054
1055int
1056bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
1057{
1058 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1059 unsigned long flags;
1060
1061 spin_lock_irqsave(&bfad->bfad_lock, flags);
1062 iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
1063 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1064 if (iocmd->status != BFA_STATUS_OK)
1065 bfa_trc(bfad, 0x5555);
1066 return 0;
1067}
1068
Krishna Gudipati51e569a2011-06-24 20:26:25 -07001069int
1070bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
1071{
1072 struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
1073 struct bfad_hal_comp fcomp;
1074 unsigned long flags;
1075
1076 init_completion(&fcomp.comp);
1077 spin_lock_irqsave(&bfad->bfad_lock, flags);
1078 iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
1079 bfad_hcb_comp, &fcomp);
1080 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1081 bfa_trc(bfad, iocmd->status);
1082 if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1083 goto out;
1084
1085 wait_for_completion(&fcomp.comp);
1086 iocmd->status = fcomp.status;
1087out:
1088 return 0;
1089}
1090
1091int
1092bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
1093{
1094 struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
1095 struct bfad_hal_comp fcomp;
1096 unsigned long flags;
1097
1098 init_completion(&fcomp.comp);
1099 spin_lock_irqsave(&bfad->bfad_lock, flags);
1100 iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
1101 bfad_hcb_comp, &fcomp);
1102 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1103 bfa_trc(bfad, iocmd->status);
1104 if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1105 goto out;
1106 wait_for_completion(&fcomp.comp);
1107 iocmd->status = fcomp.status;
1108out:
1109 return 0;
1110}
1111
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07001112int
1113bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
1114{
1115 struct bfa_bsg_flash_attr_s *iocmd =
1116 (struct bfa_bsg_flash_attr_s *)cmd;
1117 struct bfad_hal_comp fcomp;
1118 unsigned long flags;
1119
1120 init_completion(&fcomp.comp);
1121 spin_lock_irqsave(&bfad->bfad_lock, flags);
1122 iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
1123 bfad_hcb_comp, &fcomp);
1124 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1125 if (iocmd->status != BFA_STATUS_OK)
1126 goto out;
1127 wait_for_completion(&fcomp.comp);
1128 iocmd->status = fcomp.status;
1129out:
1130 return 0;
1131}
1132
1133int
1134bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
1135{
1136 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1137 struct bfad_hal_comp fcomp;
1138 unsigned long flags;
1139
1140 init_completion(&fcomp.comp);
1141 spin_lock_irqsave(&bfad->bfad_lock, flags);
1142 iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1143 iocmd->instance, bfad_hcb_comp, &fcomp);
1144 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1145 if (iocmd->status != BFA_STATUS_OK)
1146 goto out;
1147 wait_for_completion(&fcomp.comp);
1148 iocmd->status = fcomp.status;
1149out:
1150 return 0;
1151}
1152
1153int
1154bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
1155 unsigned int payload_len)
1156{
1157 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1158 void *iocmd_bufptr;
1159 struct bfad_hal_comp fcomp;
1160 unsigned long flags;
1161
1162 if (bfad_chk_iocmd_sz(payload_len,
1163 sizeof(struct bfa_bsg_flash_s),
1164 iocmd->bufsz) != BFA_STATUS_OK) {
1165 iocmd->status = BFA_STATUS_VERSION_FAIL;
1166 return 0;
1167 }
1168
1169 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1170
1171 init_completion(&fcomp.comp);
1172 spin_lock_irqsave(&bfad->bfad_lock, flags);
1173 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
1174 iocmd->type, iocmd->instance, iocmd_bufptr,
1175 iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
1176 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1177 if (iocmd->status != BFA_STATUS_OK)
1178 goto out;
1179 wait_for_completion(&fcomp.comp);
1180 iocmd->status = fcomp.status;
1181out:
1182 return 0;
1183}
1184
1185int
1186bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
1187 unsigned int payload_len)
1188{
1189 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1190 struct bfad_hal_comp fcomp;
1191 void *iocmd_bufptr;
1192 unsigned long flags;
1193
1194 if (bfad_chk_iocmd_sz(payload_len,
1195 sizeof(struct bfa_bsg_flash_s),
1196 iocmd->bufsz) != BFA_STATUS_OK) {
1197 iocmd->status = BFA_STATUS_VERSION_FAIL;
1198 return 0;
1199 }
1200
1201 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1202
1203 init_completion(&fcomp.comp);
1204 spin_lock_irqsave(&bfad->bfad_lock, flags);
1205 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1206 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
1207 bfad_hcb_comp, &fcomp);
1208 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1209 if (iocmd->status != BFA_STATUS_OK)
1210 goto out;
1211 wait_for_completion(&fcomp.comp);
1212 iocmd->status = fcomp.status;
1213out:
1214 return 0;
1215}
1216
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07001217int
1218bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
1219{
1220 struct bfa_bsg_diag_get_temp_s *iocmd =
1221 (struct bfa_bsg_diag_get_temp_s *)cmd;
1222 struct bfad_hal_comp fcomp;
1223 unsigned long flags;
1224
1225 init_completion(&fcomp.comp);
1226 spin_lock_irqsave(&bfad->bfad_lock, flags);
1227 iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
1228 &iocmd->result, bfad_hcb_comp, &fcomp);
1229 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1230 bfa_trc(bfad, iocmd->status);
1231 if (iocmd->status != BFA_STATUS_OK)
1232 goto out;
1233 wait_for_completion(&fcomp.comp);
1234 iocmd->status = fcomp.status;
1235out:
1236 return 0;
1237}
1238
1239int
1240bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
1241{
1242 struct bfa_bsg_diag_memtest_s *iocmd =
1243 (struct bfa_bsg_diag_memtest_s *)cmd;
1244 struct bfad_hal_comp fcomp;
1245 unsigned long flags;
1246
1247 init_completion(&fcomp.comp);
1248 spin_lock_irqsave(&bfad->bfad_lock, flags);
1249 iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
1250 &iocmd->memtest, iocmd->pat,
1251 &iocmd->result, bfad_hcb_comp, &fcomp);
1252 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1253 bfa_trc(bfad, iocmd->status);
1254 if (iocmd->status != BFA_STATUS_OK)
1255 goto out;
1256 wait_for_completion(&fcomp.comp);
1257 iocmd->status = fcomp.status;
1258out:
1259 return 0;
1260}
1261
1262int
1263bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
1264{
1265 struct bfa_bsg_diag_loopback_s *iocmd =
1266 (struct bfa_bsg_diag_loopback_s *)cmd;
1267 struct bfad_hal_comp fcomp;
1268 unsigned long flags;
1269
1270 init_completion(&fcomp.comp);
1271 spin_lock_irqsave(&bfad->bfad_lock, flags);
1272 iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
1273 iocmd->speed, iocmd->lpcnt, iocmd->pat,
1274 &iocmd->result, bfad_hcb_comp, &fcomp);
1275 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1276 bfa_trc(bfad, iocmd->status);
1277 if (iocmd->status != BFA_STATUS_OK)
1278 goto out;
1279 wait_for_completion(&fcomp.comp);
1280 iocmd->status = fcomp.status;
1281out:
1282 return 0;
1283}
1284
1285int
1286bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
1287{
1288 struct bfa_bsg_diag_fwping_s *iocmd =
1289 (struct bfa_bsg_diag_fwping_s *)cmd;
1290 struct bfad_hal_comp fcomp;
1291 unsigned long flags;
1292
1293 init_completion(&fcomp.comp);
1294 spin_lock_irqsave(&bfad->bfad_lock, flags);
1295 iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
1296 iocmd->pattern, &iocmd->result,
1297 bfad_hcb_comp, &fcomp);
1298 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1299 bfa_trc(bfad, iocmd->status);
1300 if (iocmd->status != BFA_STATUS_OK)
1301 goto out;
1302 bfa_trc(bfad, 0x77771);
1303 wait_for_completion(&fcomp.comp);
1304 iocmd->status = fcomp.status;
1305out:
1306 return 0;
1307}
1308
1309int
1310bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
1311{
1312 struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
1313 struct bfad_hal_comp fcomp;
1314 unsigned long flags;
1315
1316 init_completion(&fcomp.comp);
1317 spin_lock_irqsave(&bfad->bfad_lock, flags);
1318 iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
1319 iocmd->queue, &iocmd->result,
1320 bfad_hcb_comp, &fcomp);
1321 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1322 if (iocmd->status != BFA_STATUS_OK)
1323 goto out;
1324 wait_for_completion(&fcomp.comp);
1325 iocmd->status = fcomp.status;
1326out:
1327 return 0;
1328}
1329
1330int
1331bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
1332{
1333 struct bfa_bsg_sfp_show_s *iocmd =
1334 (struct bfa_bsg_sfp_show_s *)cmd;
1335 struct bfad_hal_comp fcomp;
1336 unsigned long flags;
1337
1338 init_completion(&fcomp.comp);
1339 spin_lock_irqsave(&bfad->bfad_lock, flags);
1340 iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
1341 bfad_hcb_comp, &fcomp);
1342 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1343 bfa_trc(bfad, iocmd->status);
1344 if (iocmd->status != BFA_STATUS_OK)
1345 goto out;
1346 wait_for_completion(&fcomp.comp);
1347 iocmd->status = fcomp.status;
1348 bfa_trc(bfad, iocmd->status);
1349out:
1350 return 0;
1351}
1352
1353int
1354bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
1355{
1356 struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
1357 unsigned long flags;
1358
1359 spin_lock_irqsave(&bfad->bfad_lock, flags);
1360 iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
1361 &iocmd->ledtest);
1362 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1363 return 0;
1364}
1365
1366int
1367bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
1368{
1369 struct bfa_bsg_diag_beacon_s *iocmd =
1370 (struct bfa_bsg_diag_beacon_s *)cmd;
1371 unsigned long flags;
1372
1373 spin_lock_irqsave(&bfad->bfad_lock, flags);
1374 iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
1375 iocmd->beacon, iocmd->link_e2e_beacon,
1376 iocmd->second);
1377 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1378 return 0;
1379}
1380
1381int
1382bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
1383{
1384 struct bfa_bsg_diag_lb_stat_s *iocmd =
1385 (struct bfa_bsg_diag_lb_stat_s *)cmd;
1386 unsigned long flags;
1387
1388 spin_lock_irqsave(&bfad->bfad_lock, flags);
1389 iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
1390 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1391 bfa_trc(bfad, iocmd->status);
1392
1393 return 0;
1394}
1395
Krishna Gudipati3350d982011-06-24 20:28:37 -07001396int
1397bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
1398{
1399 struct bfa_bsg_phy_attr_s *iocmd =
1400 (struct bfa_bsg_phy_attr_s *)cmd;
1401 struct bfad_hal_comp fcomp;
1402 unsigned long flags;
1403
1404 init_completion(&fcomp.comp);
1405 spin_lock_irqsave(&bfad->bfad_lock, flags);
1406 iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
1407 &iocmd->attr, bfad_hcb_comp, &fcomp);
1408 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1409 if (iocmd->status != BFA_STATUS_OK)
1410 goto out;
1411 wait_for_completion(&fcomp.comp);
1412 iocmd->status = fcomp.status;
1413out:
1414 return 0;
1415}
1416
1417int
1418bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
1419{
1420 struct bfa_bsg_phy_stats_s *iocmd =
1421 (struct bfa_bsg_phy_stats_s *)cmd;
1422 struct bfad_hal_comp fcomp;
1423 unsigned long flags;
1424
1425 init_completion(&fcomp.comp);
1426 spin_lock_irqsave(&bfad->bfad_lock, flags);
1427 iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
1428 &iocmd->stats, bfad_hcb_comp, &fcomp);
1429 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1430 if (iocmd->status != BFA_STATUS_OK)
1431 goto out;
1432 wait_for_completion(&fcomp.comp);
1433 iocmd->status = fcomp.status;
1434out:
1435 return 0;
1436}
1437
1438int
1439bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1440{
1441 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1442 struct bfad_hal_comp fcomp;
1443 void *iocmd_bufptr;
1444 unsigned long flags;
1445
1446 if (bfad_chk_iocmd_sz(payload_len,
1447 sizeof(struct bfa_bsg_phy_s),
1448 iocmd->bufsz) != BFA_STATUS_OK) {
1449 iocmd->status = BFA_STATUS_VERSION_FAIL;
1450 return 0;
1451 }
1452
1453 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1454 init_completion(&fcomp.comp);
1455 spin_lock_irqsave(&bfad->bfad_lock, flags);
1456 iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
1457 iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1458 0, bfad_hcb_comp, &fcomp);
1459 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1460 if (iocmd->status != BFA_STATUS_OK)
1461 goto out;
1462 wait_for_completion(&fcomp.comp);
1463 iocmd->status = fcomp.status;
1464 if (iocmd->status != BFA_STATUS_OK)
1465 goto out;
1466out:
1467 return 0;
1468}
1469
1470int
1471bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1472{
1473 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1474 void *iocmd_bufptr;
1475 struct bfad_hal_comp fcomp;
1476 unsigned long flags;
1477
1478 if (bfad_chk_iocmd_sz(payload_len,
1479 sizeof(struct bfa_bsg_phy_s),
1480 iocmd->bufsz) != BFA_STATUS_OK) {
1481 iocmd->status = BFA_STATUS_VERSION_FAIL;
1482 return 0;
1483 }
1484
1485 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1486 init_completion(&fcomp.comp);
1487 spin_lock_irqsave(&bfad->bfad_lock, flags);
1488 iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
1489 iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1490 0, bfad_hcb_comp, &fcomp);
1491 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1492 if (iocmd->status != BFA_STATUS_OK)
1493 goto out;
1494 wait_for_completion(&fcomp.comp);
1495 iocmd->status = fcomp.status;
1496out:
1497 return 0;
1498}
1499
Krishna Gudipatib85daaf2011-06-13 15:55:11 -07001500static int
1501bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
1502 unsigned int payload_len)
1503{
1504 int rc = EINVAL;
1505
1506 switch (cmd) {
Krishna Gudipati60138062011-06-24 20:25:15 -07001507 case IOCMD_IOC_ENABLE:
1508 rc = bfad_iocmd_ioc_enable(bfad, iocmd);
1509 break;
1510 case IOCMD_IOC_DISABLE:
1511 rc = bfad_iocmd_ioc_disable(bfad, iocmd);
1512 break;
Krishna Gudipatib85daaf2011-06-13 15:55:11 -07001513 case IOCMD_IOC_GET_INFO:
1514 rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
1515 break;
1516 case IOCMD_IOC_GET_ATTR:
1517 rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
1518 break;
Krishna Gudipati60138062011-06-24 20:25:15 -07001519 case IOCMD_IOC_GET_STATS:
1520 rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
1521 break;
1522 case IOCMD_IOC_GET_FWSTATS:
1523 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
1524 break;
1525 case IOCMD_IOCFC_GET_ATTR:
1526 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
1527 break;
1528 case IOCMD_IOCFC_SET_INTR:
1529 rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
1530 break;
1531 case IOCMD_PORT_ENABLE:
1532 rc = bfad_iocmd_port_enable(bfad, iocmd);
1533 break;
1534 case IOCMD_PORT_DISABLE:
1535 rc = bfad_iocmd_port_disable(bfad, iocmd);
1536 break;
Krishna Gudipatib85daaf2011-06-13 15:55:11 -07001537 case IOCMD_PORT_GET_ATTR:
1538 rc = bfad_iocmd_port_get_attr(bfad, iocmd);
1539 break;
Krishna Gudipati60138062011-06-24 20:25:15 -07001540 case IOCMD_PORT_GET_STATS:
1541 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
1542 break;
Krishna Gudipatib85daaf2011-06-13 15:55:11 -07001543 case IOCMD_LPORT_GET_ATTR:
1544 rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
1545 break;
Krishna Gudipati60138062011-06-24 20:25:15 -07001546 case IOCMD_LPORT_GET_STATS:
1547 rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
1548 break;
1549 case IOCMD_LPORT_GET_IOSTATS:
1550 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
1551 break;
1552 case IOCMD_LPORT_GET_RPORTS:
1553 rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
1554 break;
1555 case IOCMD_RPORT_GET_ATTR:
1556 rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
1557 break;
Krishna Gudipatib85daaf2011-06-13 15:55:11 -07001558 case IOCMD_RPORT_GET_ADDR:
1559 rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
1560 break;
Krishna Gudipati60138062011-06-24 20:25:15 -07001561 case IOCMD_RPORT_GET_STATS:
1562 rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
1563 break;
Krishna Gudipatib85daaf2011-06-13 15:55:11 -07001564 case IOCMD_FABRIC_GET_LPORTS:
1565 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
1566 break;
Krishna Gudipati60138062011-06-24 20:25:15 -07001567 case IOCMD_FCPIM_MODSTATS:
1568 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
1569 break;
1570 case IOCMD_FCPIM_DEL_ITN_STATS:
1571 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
1572 break;
Krishna Gudipatib85daaf2011-06-13 15:55:11 -07001573 case IOCMD_ITNIM_GET_ATTR:
1574 rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
1575 break;
Krishna Gudipati60138062011-06-24 20:25:15 -07001576 case IOCMD_ITNIM_GET_IOSTATS:
1577 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
1578 break;
1579 case IOCMD_ITNIM_GET_ITNSTATS:
1580 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
1581 break;
1582 case IOCMD_FCPORT_ENABLE:
1583 rc = bfad_iocmd_fcport_enable(bfad, iocmd);
1584 break;
1585 case IOCMD_FCPORT_DISABLE:
1586 rc = bfad_iocmd_fcport_disable(bfad, iocmd);
1587 break;
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07001588 case IOCMD_IOC_PCIFN_CFG:
1589 rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
1590 break;
1591 case IOCMD_PCIFN_CREATE:
1592 rc = bfad_iocmd_pcifn_create(bfad, iocmd);
1593 break;
1594 case IOCMD_PCIFN_DELETE:
1595 rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
1596 break;
1597 case IOCMD_PCIFN_BW:
1598 rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
1599 break;
1600 case IOCMD_ADAPTER_CFG_MODE:
1601 rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
1602 break;
1603 case IOCMD_PORT_CFG_MODE:
1604 rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
1605 break;
1606 case IOCMD_FLASH_ENABLE_OPTROM:
1607 case IOCMD_FLASH_DISABLE_OPTROM:
1608 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
1609 break;
Krishna Gudipatia7141342011-06-24 20:23:19 -07001610 case IOCMD_FAA_ENABLE:
1611 rc = bfad_iocmd_faa_enable(bfad, iocmd);
1612 break;
1613 case IOCMD_FAA_DISABLE:
1614 rc = bfad_iocmd_faa_disable(bfad, iocmd);
1615 break;
1616 case IOCMD_FAA_QUERY:
1617 rc = bfad_iocmd_faa_query(bfad, iocmd);
1618 break;
Krishna Gudipati148d6102011-06-24 20:25:36 -07001619 case IOCMD_CEE_GET_ATTR:
1620 rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
1621 break;
1622 case IOCMD_CEE_GET_STATS:
1623 rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
1624 break;
1625 case IOCMD_CEE_RESET_STATS:
1626 rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
1627 break;
Krishna Gudipati51e569a2011-06-24 20:26:25 -07001628 case IOCMD_SFP_MEDIA:
1629 rc = bfad_iocmd_sfp_media(bfad, iocmd);
1630 break;
1631 case IOCMD_SFP_SPEED:
1632 rc = bfad_iocmd_sfp_speed(bfad, iocmd);
1633 break;
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07001634 case IOCMD_FLASH_GET_ATTR:
1635 rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
1636 break;
1637 case IOCMD_FLASH_ERASE_PART:
1638 rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
1639 break;
1640 case IOCMD_FLASH_UPDATE_PART:
1641 rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
1642 break;
1643 case IOCMD_FLASH_READ_PART:
1644 rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
1645 break;
Krishna Gudipati3d7fc662011-06-24 20:28:17 -07001646 case IOCMD_DIAG_TEMP:
1647 rc = bfad_iocmd_diag_temp(bfad, iocmd);
1648 break;
1649 case IOCMD_DIAG_MEMTEST:
1650 rc = bfad_iocmd_diag_memtest(bfad, iocmd);
1651 break;
1652 case IOCMD_DIAG_LOOPBACK:
1653 rc = bfad_iocmd_diag_loopback(bfad, iocmd);
1654 break;
1655 case IOCMD_DIAG_FWPING:
1656 rc = bfad_iocmd_diag_fwping(bfad, iocmd);
1657 break;
1658 case IOCMD_DIAG_QUEUETEST:
1659 rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
1660 break;
1661 case IOCMD_DIAG_SFP:
1662 rc = bfad_iocmd_diag_sfp(bfad, iocmd);
1663 break;
1664 case IOCMD_DIAG_LED:
1665 rc = bfad_iocmd_diag_led(bfad, iocmd);
1666 break;
1667 case IOCMD_DIAG_BEACON_LPORT:
1668 rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
1669 break;
1670 case IOCMD_DIAG_LB_STAT:
1671 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
1672 break;
Krishna Gudipati3350d982011-06-24 20:28:37 -07001673 case IOCMD_PHY_GET_ATTR:
1674 rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
1675 break;
1676 case IOCMD_PHY_GET_STATS:
1677 rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
1678 break;
1679 case IOCMD_PHY_UPDATE_FW:
1680 rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
1681 break;
1682 case IOCMD_PHY_READ_FW:
1683 rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
1684 break;
Krishna Gudipatib85daaf2011-06-13 15:55:11 -07001685 default:
1686 rc = EINVAL;
1687 break;
1688 }
1689 return -rc;
1690}
1691
1692static int
1693bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
1694{
1695 uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
1696 struct bfad_im_port_s *im_port =
1697 (struct bfad_im_port_s *) job->shost->hostdata[0];
1698 struct bfad_s *bfad = im_port->bfad;
1699 void *payload_kbuf;
1700 int rc = -EINVAL;
1701
1702 /* Allocate a temp buffer to hold the passed in user space command */
1703 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
1704 if (!payload_kbuf) {
1705 rc = -ENOMEM;
1706 goto out;
1707 }
1708
1709 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
1710 sg_copy_to_buffer(job->request_payload.sg_list,
1711 job->request_payload.sg_cnt, payload_kbuf,
1712 job->request_payload.payload_len);
1713
1714 /* Invoke IOCMD handler - to handle all the vendor command requests */
1715 rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
1716 job->request_payload.payload_len);
1717 if (rc != BFA_STATUS_OK)
1718 goto error;
1719
1720 /* Copy the response data to the job->reply_payload sg_list */
1721 sg_copy_from_buffer(job->reply_payload.sg_list,
1722 job->reply_payload.sg_cnt,
1723 payload_kbuf,
1724 job->reply_payload.payload_len);
1725
1726 /* free the command buffer */
1727 kfree(payload_kbuf);
1728
1729 /* Fill the BSG job reply data */
1730 job->reply_len = job->reply_payload.payload_len;
1731 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
1732 job->reply->result = rc;
1733
1734 job->job_done(job);
1735 return rc;
1736error:
1737 /* free the command buffer */
1738 kfree(payload_kbuf);
1739out:
1740 job->reply->result = rc;
1741 job->reply_len = sizeof(uint32_t);
1742 job->reply->reply_payload_rcv_len = 0;
1743 return rc;
1744}
1745
1746/* FC passthru call backs */
1747u64
1748bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
1749{
1750 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
1751 struct bfa_sge_s *sge;
1752 u64 addr;
1753
1754 sge = drv_fcxp->req_sge + sgeid;
1755 addr = (u64)(size_t) sge->sg_addr;
1756 return addr;
1757}
1758
1759u32
1760bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
1761{
1762 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
1763 struct bfa_sge_s *sge;
1764
1765 sge = drv_fcxp->req_sge + sgeid;
1766 return sge->sg_len;
1767}
1768
1769u64
1770bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
1771{
1772 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
1773 struct bfa_sge_s *sge;
1774 u64 addr;
1775
1776 sge = drv_fcxp->rsp_sge + sgeid;
1777 addr = (u64)(size_t) sge->sg_addr;
1778 return addr;
1779}
1780
1781u32
1782bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
1783{
1784 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
1785 struct bfa_sge_s *sge;
1786
1787 sge = drv_fcxp->rsp_sge + sgeid;
1788 return sge->sg_len;
1789}
1790
1791void
1792bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
1793 bfa_status_t req_status, u32 rsp_len, u32 resid_len,
1794 struct fchs_s *rsp_fchs)
1795{
1796 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
1797
1798 drv_fcxp->req_status = req_status;
1799 drv_fcxp->rsp_len = rsp_len;
1800
1801 /* bfa_fcxp will be automatically freed by BFA */
1802 drv_fcxp->bfa_fcxp = NULL;
1803 complete(&drv_fcxp->comp);
1804}
1805
1806struct bfad_buf_info *
1807bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
1808 uint32_t payload_len, uint32_t *num_sgles)
1809{
1810 struct bfad_buf_info *buf_base, *buf_info;
1811 struct bfa_sge_s *sg_table;
1812 int sge_num = 1;
1813
1814 buf_base = kzalloc((sizeof(struct bfad_buf_info) +
1815 sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
1816 if (!buf_base)
1817 return NULL;
1818
1819 sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
1820 (sizeof(struct bfad_buf_info) * sge_num));
1821
1822 /* Allocate dma coherent memory */
1823 buf_info = buf_base;
1824 buf_info->size = payload_len;
1825 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
1826 &buf_info->phys, GFP_KERNEL);
1827 if (!buf_info->virt)
1828 goto out_free_mem;
1829
1830 /* copy the linear bsg buffer to buf_info */
1831 memset(buf_info->virt, 0, buf_info->size);
1832 memcpy(buf_info->virt, payload_kbuf, buf_info->size);
1833
1834 /*
1835 * Setup SG table
1836 */
1837 sg_table->sg_len = buf_info->size;
1838 sg_table->sg_addr = (void *)(size_t) buf_info->phys;
1839
1840 *num_sgles = sge_num;
1841
1842 return buf_base;
1843
1844out_free_mem:
1845 kfree(buf_base);
1846 return NULL;
1847}
1848
1849void
1850bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
1851 uint32_t num_sgles)
1852{
1853 int i;
1854 struct bfad_buf_info *buf_info = buf_base;
1855
1856 if (buf_base) {
1857 for (i = 0; i < num_sgles; buf_info++, i++) {
1858 if (buf_info->virt != NULL)
1859 dma_free_coherent(&bfad->pcidev->dev,
1860 buf_info->size, buf_info->virt,
1861 buf_info->phys);
1862 }
1863 kfree(buf_base);
1864 }
1865}
1866
1867int
1868bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
1869 bfa_bsg_fcpt_t *bsg_fcpt)
1870{
1871 struct bfa_fcxp_s *hal_fcxp;
1872 struct bfad_s *bfad = drv_fcxp->port->bfad;
1873 unsigned long flags;
1874 uint8_t lp_tag;
1875
1876 spin_lock_irqsave(&bfad->bfad_lock, flags);
1877
1878 /* Allocate bfa_fcxp structure */
1879 hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
1880 drv_fcxp->num_req_sgles,
1881 drv_fcxp->num_rsp_sgles,
1882 bfad_fcxp_get_req_sgaddr_cb,
1883 bfad_fcxp_get_req_sglen_cb,
1884 bfad_fcxp_get_rsp_sgaddr_cb,
1885 bfad_fcxp_get_rsp_sglen_cb);
1886 if (!hal_fcxp) {
1887 bfa_trc(bfad, 0);
1888 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1889 return BFA_STATUS_ENOMEM;
1890 }
1891
1892 drv_fcxp->bfa_fcxp = hal_fcxp;
1893
1894 lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
1895
1896 bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
1897 bsg_fcpt->cts, bsg_fcpt->cos,
1898 job->request_payload.payload_len,
1899 &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
1900 job->reply_payload.payload_len, bsg_fcpt->tsecs);
1901
1902 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1903
1904 return BFA_STATUS_OK;
1905}
1906
1907int
1908bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
1909{
1910 struct bfa_bsg_data *bsg_data;
1911 struct bfad_im_port_s *im_port =
1912 (struct bfad_im_port_s *) job->shost->hostdata[0];
1913 struct bfad_s *bfad = im_port->bfad;
1914 bfa_bsg_fcpt_t *bsg_fcpt;
1915 struct bfad_fcxp *drv_fcxp;
1916 struct bfa_fcs_lport_s *fcs_port;
1917 struct bfa_fcs_rport_s *fcs_rport;
1918 uint32_t command_type = job->request->msgcode;
1919 unsigned long flags;
1920 struct bfad_buf_info *rsp_buf_info;
1921 void *req_kbuf = NULL, *rsp_kbuf = NULL;
1922 int rc = -EINVAL;
1923
1924 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
1925 job->reply->reply_payload_rcv_len = 0;
1926
1927 /* Get the payload passed in from userspace */
1928 bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
1929 sizeof(struct fc_bsg_request));
1930 if (bsg_data == NULL)
1931 goto out;
1932
1933 /*
1934 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
1935 * buffer of size bsg_data->payload_len
1936 */
1937 bsg_fcpt = (struct bfa_bsg_fcpt_s *)
1938 kzalloc(bsg_data->payload_len, GFP_KERNEL);
1939 if (!bsg_fcpt)
1940 goto out;
1941
1942 if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
1943 bsg_data->payload_len)) {
1944 kfree(bsg_fcpt);
1945 goto out;
1946 }
1947
1948 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
1949 if (drv_fcxp == NULL) {
1950 rc = -ENOMEM;
1951 goto out;
1952 }
1953
1954 spin_lock_irqsave(&bfad->bfad_lock, flags);
1955 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
1956 bsg_fcpt->lpwwn);
1957 if (fcs_port == NULL) {
1958 bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
1959 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1960 goto out_free_mem;
1961 }
1962
1963 /* Check if the port is online before sending FC Passthru cmd */
1964 if (!bfa_fcs_lport_is_online(fcs_port)) {
1965 bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
1966 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1967 goto out_free_mem;
1968 }
1969
1970 drv_fcxp->port = fcs_port->bfad_port;
1971
1972 if (drv_fcxp->port->bfad == 0)
1973 drv_fcxp->port->bfad = bfad;
1974
1975 /* Fetch the bfa_rport - if nexus needed */
1976 if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
1977 command_type == FC_BSG_HST_CT) {
1978 /* BSG HST commands: no nexus needed */
1979 drv_fcxp->bfa_rport = NULL;
1980
1981 } else if (command_type == FC_BSG_RPT_ELS ||
1982 command_type == FC_BSG_RPT_CT) {
1983 /* BSG RPT commands: nexus needed */
1984 fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
1985 bsg_fcpt->dpwwn);
1986 if (fcs_rport == NULL) {
1987 bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
1988 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1989 goto out_free_mem;
1990 }
1991
1992 drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
1993
1994 } else { /* Unknown BSG msgcode; return -EINVAL */
1995 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1996 goto out_free_mem;
1997 }
1998
1999 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2000
2001 /* allocate memory for req / rsp buffers */
2002 req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
2003 if (!req_kbuf) {
2004 printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
2005 bfad->pci_name);
2006 rc = -ENOMEM;
2007 goto out_free_mem;
2008 }
2009
2010 rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
2011 if (!rsp_kbuf) {
2012 printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
2013 bfad->pci_name);
2014 rc = -ENOMEM;
2015 goto out_free_mem;
2016 }
2017
2018 /* map req sg - copy the sg_list passed in to the linear buffer */
2019 sg_copy_to_buffer(job->request_payload.sg_list,
2020 job->request_payload.sg_cnt, req_kbuf,
2021 job->request_payload.payload_len);
2022
2023 drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
2024 job->request_payload.payload_len,
2025 &drv_fcxp->num_req_sgles);
2026 if (!drv_fcxp->reqbuf_info) {
2027 printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
2028 bfad->pci_name);
2029 rc = -ENOMEM;
2030 goto out_free_mem;
2031 }
2032
2033 drv_fcxp->req_sge = (struct bfa_sge_s *)
2034 (((uint8_t *)drv_fcxp->reqbuf_info) +
2035 (sizeof(struct bfad_buf_info) *
2036 drv_fcxp->num_req_sgles));
2037
2038 /* map rsp sg */
2039 drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
2040 job->reply_payload.payload_len,
2041 &drv_fcxp->num_rsp_sgles);
2042 if (!drv_fcxp->rspbuf_info) {
2043 printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
2044 bfad->pci_name);
2045 rc = -ENOMEM;
2046 goto out_free_mem;
2047 }
2048
2049 rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
2050 drv_fcxp->rsp_sge = (struct bfa_sge_s *)
2051 (((uint8_t *)drv_fcxp->rspbuf_info) +
2052 (sizeof(struct bfad_buf_info) *
2053 drv_fcxp->num_rsp_sgles));
2054
2055 /* fcxp send */
2056 init_completion(&drv_fcxp->comp);
2057 rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
2058 if (rc == BFA_STATUS_OK) {
2059 wait_for_completion(&drv_fcxp->comp);
2060 bsg_fcpt->status = drv_fcxp->req_status;
2061 } else {
2062 bsg_fcpt->status = rc;
2063 goto out_free_mem;
2064 }
2065
2066 /* fill the job->reply data */
2067 if (drv_fcxp->req_status == BFA_STATUS_OK) {
2068 job->reply_len = drv_fcxp->rsp_len;
2069 job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
2070 job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
2071 } else {
2072 job->reply->reply_payload_rcv_len =
2073 sizeof(struct fc_bsg_ctels_reply);
2074 job->reply_len = sizeof(uint32_t);
2075 job->reply->reply_data.ctels_reply.status =
2076 FC_CTELS_STATUS_REJECT;
2077 }
2078
2079 /* Copy the response data to the reply_payload sg list */
2080 sg_copy_from_buffer(job->reply_payload.sg_list,
2081 job->reply_payload.sg_cnt,
2082 (uint8_t *)rsp_buf_info->virt,
2083 job->reply_payload.payload_len);
2084
2085out_free_mem:
2086 bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
2087 drv_fcxp->num_rsp_sgles);
2088 bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
2089 drv_fcxp->num_req_sgles);
2090 kfree(req_kbuf);
2091 kfree(rsp_kbuf);
2092
2093 /* Need a copy to user op */
2094 if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
2095 bsg_data->payload_len))
2096 rc = -EIO;
2097
2098 kfree(bsg_fcpt);
2099 kfree(drv_fcxp);
2100out:
2101 job->reply->result = rc;
2102
2103 if (rc == BFA_STATUS_OK)
2104 job->job_done(job);
2105
2106 return rc;
2107}
2108
2109int
2110bfad_im_bsg_request(struct fc_bsg_job *job)
2111{
2112 uint32_t rc = BFA_STATUS_OK;
2113
2114 /* Increment the bfa module refcnt - if bsg request is in service */
2115 bfad_im_bsg_get_kobject(job);
2116
2117 switch (job->request->msgcode) {
2118 case FC_BSG_HST_VENDOR:
2119 /* Process BSG HST Vendor requests */
2120 rc = bfad_im_bsg_vendor_request(job);
2121 break;
2122 case FC_BSG_HST_ELS_NOLOGIN:
2123 case FC_BSG_RPT_ELS:
2124 case FC_BSG_HST_CT:
2125 case FC_BSG_RPT_CT:
2126 /* Process BSG ELS/CT commands */
2127 rc = bfad_im_bsg_els_ct_request(job);
2128 break;
2129 default:
2130 job->reply->result = rc = -EINVAL;
2131 job->reply->reply_payload_rcv_len = 0;
2132 break;
2133 }
2134
2135 /* Decrement the bfa module refcnt - on completion of bsg request */
2136 bfad_im_bsg_put_kobject(job);
2137
2138 return rc;
2139}
2140
2141int
2142bfad_im_bsg_timeout(struct fc_bsg_job *job)
2143{
2144 /* Don't complete the BSG job request - return -EAGAIN
2145 * to reset bsg job timeout : for ELS/CT pass thru we
2146 * already have timer to track the request.
2147 */
2148 return -EAGAIN;
2149}