blob: 8ed147e803c3e160bedfab0fbfccd06eae973375 [file] [log] [blame]
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_ioc.h"
20#include "cna.h"
21#include "bfi.h"
22#include "bfi_ctreg.h"
23#include "bfa_defs.h"
24
25/**
26 * IOC local definitions
27 */
28
29#define bfa_ioc_timer_start(__ioc) \
30 mod_timer(&(__ioc)->ioc_timer, jiffies + \
31 msecs_to_jiffies(BFA_IOC_TOV))
32#define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
33
34#define bfa_ioc_recovery_timer_start(__ioc) \
35 mod_timer(&(__ioc)->ioc_timer, jiffies + \
36 msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
37
38#define bfa_sem_timer_start(__ioc) \
39 mod_timer(&(__ioc)->sem_timer, jiffies + \
40 msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
41#define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
42
43#define bfa_hb_timer_start(__ioc) \
44 mod_timer(&(__ioc)->hb_timer, jiffies + \
45 msecs_to_jiffies(BFA_IOC_HB_TOV))
46#define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
47
48/**
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50 */
51
52#define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54#define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58#define bfa_ioc_notify_hbfail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
60
Rasesh Mody8b230ed2010-08-23 20:24:12 -070061#define bfa_ioc_mbox_cmd_pending(__ioc) \
62 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
63 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
64
Rasesh Modyb7ee31c2010-10-05 15:46:05 +000065static bool bfa_nw_auto_recover = true;
Rasesh Mody8b230ed2010-08-23 20:24:12 -070066
67/*
68 * forward declarations
69 */
70static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
71static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
72static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
73static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
74static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
75static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
76static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
77static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
78static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
79static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
80static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
81static void bfa_ioc_recover(struct bfa_ioc *ioc);
82static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
83static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
84static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
Rasesh Mody8a891422010-08-25 23:00:27 -070085static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
86 u32 boot_param);
87static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
88static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
89static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
90 char *serial_num);
91static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
92 char *fw_ver);
93static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
94 char *chip_rev);
95static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
96 char *optrom_ver);
97static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
98 char *manufacturer);
99static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
100static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700101
102/**
103 * IOC state machine events
104 */
105enum ioc_event {
106 IOC_E_ENABLE = 1, /*!< IOC enable request */
107 IOC_E_DISABLE = 2, /*!< IOC disable request */
108 IOC_E_TIMEOUT = 3, /*!< f/w response timeout */
109 IOC_E_FWREADY = 4, /*!< f/w initialization done */
110 IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */
111 IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */
112 IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */
113 IOC_E_HBFAIL = 8, /*!< heartbeat failure */
114 IOC_E_HWERROR = 9, /*!< hardware error interrupt */
115 IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
116 IOC_E_DETACH = 11, /*!< driver detach cleanup */
117};
118
119bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
120bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
121bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
122bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
123bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
124bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
125bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
126bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
127bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
128bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
129bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
130bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
131
132static struct bfa_sm_table ioc_sm_table[] = {
133 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
134 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
135 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
136 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
137 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
138 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
139 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
140 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
141 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
142 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
143 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
145};
146
147/**
148 * Reset entry actions -- initialize state machine
149 */
150static void
151bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
152{
153 ioc->retry_count = 0;
Rasesh Mody8a891422010-08-25 23:00:27 -0700154 ioc->auto_recover = bfa_nw_auto_recover;
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700155}
156
157/**
158 * Beginning state. IOC is in reset state.
159 */
160static void
161bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
162{
163 switch (event) {
164 case IOC_E_ENABLE:
165 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
166 break;
167
168 case IOC_E_DISABLE:
169 bfa_ioc_disable_comp(ioc);
170 break;
171
172 case IOC_E_DETACH:
173 break;
174
175 default:
176 bfa_sm_fault(ioc, event);
177 }
178}
179
180/**
181 * Semaphore should be acquired for version check.
182 */
183static void
184bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
185{
186 bfa_ioc_hw_sem_get(ioc);
187}
188
189/**
190 * Awaiting h/w semaphore to continue with version check.
191 */
192static void
193bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
194{
195 switch (event) {
196 case IOC_E_SEMLOCKED:
197 if (bfa_ioc_firmware_lock(ioc)) {
198 ioc->retry_count = 0;
199 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
200 } else {
Rasesh Mody8a891422010-08-25 23:00:27 -0700201 bfa_nw_ioc_hw_sem_release(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700202 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
203 }
204 break;
205
206 case IOC_E_DISABLE:
207 bfa_ioc_disable_comp(ioc);
208 /* fall through */
209
210 case IOC_E_DETACH:
211 bfa_ioc_hw_sem_get_cancel(ioc);
212 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
213 break;
214
215 case IOC_E_FWREADY:
216 break;
217
218 default:
219 bfa_sm_fault(ioc, event);
220 }
221}
222
223/**
224 * Notify enable completion callback and generate mismatch AEN.
225 */
226static void
227bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
228{
229 /**
230 * Provide enable completion callback and AEN notification only once.
231 */
232 if (ioc->retry_count == 0)
233 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
234 ioc->retry_count++;
235 bfa_ioc_timer_start(ioc);
236}
237
238/**
239 * Awaiting firmware version match.
240 */
241static void
242bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
243{
244 switch (event) {
245 case IOC_E_TIMEOUT:
246 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
247 break;
248
249 case IOC_E_DISABLE:
250 bfa_ioc_disable_comp(ioc);
251 /* fall through */
252
253 case IOC_E_DETACH:
254 bfa_ioc_timer_stop(ioc);
255 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
256 break;
257
258 case IOC_E_FWREADY:
259 break;
260
261 default:
262 bfa_sm_fault(ioc, event);
263 }
264}
265
266/**
267 * Request for semaphore.
268 */
269static void
270bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
271{
272 bfa_ioc_hw_sem_get(ioc);
273}
274
275/**
276 * Awaiting semaphore for h/w initialzation.
277 */
278static void
279bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
280{
281 switch (event) {
282 case IOC_E_SEMLOCKED:
283 ioc->retry_count = 0;
284 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
285 break;
286
287 case IOC_E_DISABLE:
288 bfa_ioc_hw_sem_get_cancel(ioc);
289 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
290 break;
291
292 default:
293 bfa_sm_fault(ioc, event);
294 }
295}
296
297static void
298bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
299{
300 bfa_ioc_timer_start(ioc);
301 bfa_ioc_reset(ioc, false);
302}
303
304/**
305 * @brief
306 * Hardware is being initialized. Interrupts are enabled.
307 * Holding hardware semaphore lock.
308 */
309static void
310bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
311{
312 switch (event) {
313 case IOC_E_FWREADY:
314 bfa_ioc_timer_stop(ioc);
315 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
316 break;
317
318 case IOC_E_HWERROR:
319 bfa_ioc_timer_stop(ioc);
320 /* fall through */
321
322 case IOC_E_TIMEOUT:
323 ioc->retry_count++;
324 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
325 bfa_ioc_timer_start(ioc);
326 bfa_ioc_reset(ioc, true);
327 break;
328 }
329
Rasesh Mody8a891422010-08-25 23:00:27 -0700330 bfa_nw_ioc_hw_sem_release(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700331 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
332 break;
333
334 case IOC_E_DISABLE:
Rasesh Mody8a891422010-08-25 23:00:27 -0700335 bfa_nw_ioc_hw_sem_release(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700336 bfa_ioc_timer_stop(ioc);
337 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
338 break;
339
340 default:
341 bfa_sm_fault(ioc, event);
342 }
343}
344
345static void
346bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
347{
348 bfa_ioc_timer_start(ioc);
349 bfa_ioc_send_enable(ioc);
350}
351
352/**
353 * Host IOC function is being enabled, awaiting response from firmware.
354 * Semaphore is acquired.
355 */
356static void
357bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
358{
359 switch (event) {
360 case IOC_E_FWRSP_ENABLE:
361 bfa_ioc_timer_stop(ioc);
Rasesh Mody8a891422010-08-25 23:00:27 -0700362 bfa_nw_ioc_hw_sem_release(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700363 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
364 break;
365
366 case IOC_E_HWERROR:
367 bfa_ioc_timer_stop(ioc);
368 /* fall through */
369
370 case IOC_E_TIMEOUT:
371 ioc->retry_count++;
372 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
373 writel(BFI_IOC_UNINIT,
374 ioc->ioc_regs.ioc_fwstate);
375 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
376 break;
377 }
378
Rasesh Mody8a891422010-08-25 23:00:27 -0700379 bfa_nw_ioc_hw_sem_release(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700380 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
381 break;
382
383 case IOC_E_DISABLE:
384 bfa_ioc_timer_stop(ioc);
Rasesh Mody8a891422010-08-25 23:00:27 -0700385 bfa_nw_ioc_hw_sem_release(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700386 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
387 break;
388
389 case IOC_E_FWREADY:
390 bfa_ioc_send_enable(ioc);
391 break;
392
393 default:
394 bfa_sm_fault(ioc, event);
395 }
396}
397
398static void
399bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
400{
401 bfa_ioc_timer_start(ioc);
402 bfa_ioc_send_getattr(ioc);
403}
404
405/**
406 * @brief
407 * IOC configuration in progress. Timer is active.
408 */
409static void
410bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
411{
412 switch (event) {
413 case IOC_E_FWRSP_GETATTR:
414 bfa_ioc_timer_stop(ioc);
415 bfa_ioc_check_attr_wwns(ioc);
416 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
417 break;
418
419 case IOC_E_HWERROR:
420 bfa_ioc_timer_stop(ioc);
421 /* fall through */
422
423 case IOC_E_TIMEOUT:
424 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
425 break;
426
427 case IOC_E_DISABLE:
428 bfa_ioc_timer_stop(ioc);
429 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
430 break;
431
432 default:
433 bfa_sm_fault(ioc, event);
434 }
435}
436
437static void
438bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
439{
440 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
441 bfa_ioc_hb_monitor(ioc);
442}
443
444static void
445bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
446{
447 switch (event) {
448 case IOC_E_ENABLE:
449 break;
450
451 case IOC_E_DISABLE:
452 bfa_ioc_hb_stop(ioc);
453 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
454 break;
455
456 case IOC_E_HWERROR:
457 case IOC_E_FWREADY:
458 /**
459 * Hard error or IOC recovery by other function.
460 * Treat it same as heartbeat failure.
461 */
462 bfa_ioc_hb_stop(ioc);
463 /* !!! fall through !!! */
464
465 case IOC_E_HBFAIL:
466 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
467 break;
468
469 default:
470 bfa_sm_fault(ioc, event);
471 }
472}
473
474static void
475bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
476{
477 bfa_ioc_timer_start(ioc);
478 bfa_ioc_send_disable(ioc);
479}
480
481/**
482 * IOC is being disabled
483 */
484static void
485bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
486{
487 switch (event) {
488 case IOC_E_FWRSP_DISABLE:
489 bfa_ioc_timer_stop(ioc);
490 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
491 break;
492
493 case IOC_E_HWERROR:
494 bfa_ioc_timer_stop(ioc);
495 /*
496 * !!! fall through !!!
497 */
498
499 case IOC_E_TIMEOUT:
500 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
501 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
502 break;
503
504 default:
505 bfa_sm_fault(ioc, event);
506 }
507}
508
509/**
510 * IOC disable completion entry.
511 */
512static void
513bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
514{
515 bfa_ioc_disable_comp(ioc);
516}
517
518static void
519bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
520{
521 switch (event) {
522 case IOC_E_ENABLE:
523 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
524 break;
525
526 case IOC_E_DISABLE:
527 ioc->cbfn->disable_cbfn(ioc->bfa);
528 break;
529
530 case IOC_E_FWREADY:
531 break;
532
533 case IOC_E_DETACH:
534 bfa_ioc_firmware_unlock(ioc);
535 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
536 break;
537
538 default:
539 bfa_sm_fault(ioc, event);
540 }
541}
542
543static void
544bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
545{
546 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
547 bfa_ioc_timer_start(ioc);
548}
549
550/**
551 * @brief
552 * Hardware initialization failed.
553 */
554static void
555bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
556{
557 switch (event) {
558 case IOC_E_DISABLE:
559 bfa_ioc_timer_stop(ioc);
560 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
561 break;
562
563 case IOC_E_DETACH:
564 bfa_ioc_timer_stop(ioc);
565 bfa_ioc_firmware_unlock(ioc);
566 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
567 break;
568
569 case IOC_E_TIMEOUT:
570 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
571 break;
572
573 default:
574 bfa_sm_fault(ioc, event);
575 }
576}
577
578static void
579bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
580{
581 struct list_head *qe;
582 struct bfa_ioc_hbfail_notify *notify;
583
584 /**
585 * Mark IOC as failed in hardware and stop firmware.
586 */
587 bfa_ioc_lpu_stop(ioc);
588 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
589
590 /**
591 * Notify other functions on HB failure.
592 */
593 bfa_ioc_notify_hbfail(ioc);
594
595 /**
596 * Notify driver and common modules registered for notification.
597 */
598 ioc->cbfn->hbfail_cbfn(ioc->bfa);
599 list_for_each(qe, &ioc->hb_notify_q) {
600 notify = (struct bfa_ioc_hbfail_notify *) qe;
601 notify->cbfn(notify->cbarg);
602 }
603
604 /**
605 * Flush any queued up mailbox requests.
606 */
607 bfa_ioc_mbox_hbfail(ioc);
608
609 /**
610 * Trigger auto-recovery after a delay.
611 */
612 if (ioc->auto_recover)
613 mod_timer(&ioc->ioc_timer, jiffies +
614 msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
615}
616
617/**
618 * @brief
619 * IOC heartbeat failure.
620 */
621static void
622bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
623{
624 switch (event) {
625
626 case IOC_E_ENABLE:
627 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
628 break;
629
630 case IOC_E_DISABLE:
631 if (ioc->auto_recover)
632 bfa_ioc_timer_stop(ioc);
633 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
634 break;
635
636 case IOC_E_TIMEOUT:
637 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
638 break;
639
640 case IOC_E_FWREADY:
641 /**
642 * Recovery is already initiated by other function.
643 */
644 break;
645
646 case IOC_E_HWERROR:
647 /*
648 * HB failure notification, ignore.
649 */
650 break;
651 default:
652 bfa_sm_fault(ioc, event);
653 }
654}
655
656/**
657 * BFA IOC private functions
658 */
659
660static void
661bfa_ioc_disable_comp(struct bfa_ioc *ioc)
662{
663 struct list_head *qe;
664 struct bfa_ioc_hbfail_notify *notify;
665
666 ioc->cbfn->disable_cbfn(ioc->bfa);
667
668 /**
669 * Notify common modules registered for notification.
670 */
671 list_for_each(qe, &ioc->hb_notify_q) {
672 notify = (struct bfa_ioc_hbfail_notify *) qe;
673 notify->cbfn(notify->cbarg);
674 }
675}
676
677void
Rasesh Mody8a891422010-08-25 23:00:27 -0700678bfa_nw_ioc_sem_timeout(void *ioc_arg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700679{
680 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
681
682 bfa_ioc_hw_sem_get(ioc);
683}
684
685bool
Rasesh Mody8a891422010-08-25 23:00:27 -0700686bfa_nw_ioc_sem_get(void __iomem *sem_reg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700687{
688 u32 r32;
689 int cnt = 0;
690#define BFA_SEM_SPINCNT 3000
691
692 r32 = readl(sem_reg);
693
694 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
695 cnt++;
696 udelay(2);
697 r32 = readl(sem_reg);
698 }
699
700 if (r32 == 0)
701 return true;
702
703 BUG_ON(!(cnt < BFA_SEM_SPINCNT));
704 return false;
705}
706
707void
Rasesh Mody8a891422010-08-25 23:00:27 -0700708bfa_nw_ioc_sem_release(void __iomem *sem_reg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700709{
710 writel(1, sem_reg);
711}
712
713static void
714bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
715{
716 u32 r32;
717
718 /**
719 * First read to the semaphore register will return 0, subsequent reads
720 * will return 1. Semaphore is released by writing 1 to the register
721 */
722 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
723 if (r32 == 0) {
724 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
725 return;
726 }
727
728 mod_timer(&ioc->sem_timer, jiffies +
729 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
730}
731
732void
Rasesh Mody8a891422010-08-25 23:00:27 -0700733bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700734{
735 writel(1, ioc->ioc_regs.ioc_sem_reg);
736}
737
738static void
739bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
740{
741 del_timer(&ioc->sem_timer);
742}
743
744/**
745 * @brief
746 * Initialize LPU local memory (aka secondary memory / SRAM)
747 */
748static void
749bfa_ioc_lmem_init(struct bfa_ioc *ioc)
750{
751 u32 pss_ctl;
752 int i;
753#define PSS_LMEM_INIT_TIME 10000
754
755 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
756 pss_ctl &= ~__PSS_LMEM_RESET;
757 pss_ctl |= __PSS_LMEM_INIT_EN;
758
759 /*
760 * i2c workaround 12.5khz clock
761 */
762 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
763 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
764
765 /**
766 * wait for memory initialization to be complete
767 */
768 i = 0;
769 do {
770 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
771 i++;
772 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
773
774 /**
775 * If memory initialization is not successful, IOC timeout will catch
776 * such failures.
777 */
778 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
779
780 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
781 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
782}
783
784static void
785bfa_ioc_lpu_start(struct bfa_ioc *ioc)
786{
787 u32 pss_ctl;
788
789 /**
790 * Take processor out of reset.
791 */
792 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
793 pss_ctl &= ~__PSS_LPU0_RESET;
794
795 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
796}
797
798static void
799bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
800{
801 u32 pss_ctl;
802
803 /**
804 * Put processors in reset.
805 */
806 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
807 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
808
809 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
810}
811
812/**
813 * Get driver and firmware versions.
814 */
815void
Rasesh Mody8a891422010-08-25 23:00:27 -0700816bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700817{
818 u32 pgnum, pgoff;
819 u32 loff = 0;
820 int i;
821 u32 *fwsig = (u32 *) fwhdr;
822
823 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
824 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
825 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
826
827 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
828 i++) {
829 fwsig[i] =
830 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
831 loff += sizeof(u32);
832 }
833}
834
835/**
836 * Returns TRUE if same.
837 */
838bool
Rasesh Mody8a891422010-08-25 23:00:27 -0700839bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700840{
841 struct bfi_ioc_image_hdr *drv_fwhdr;
842 int i;
843
844 drv_fwhdr = (struct bfi_ioc_image_hdr *)
845 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
846
847 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
848 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
849 return false;
850 }
851
852 return true;
853}
854
855/**
856 * Return true if current running version is valid. Firmware signature and
857 * execution context (driver/bios) must match.
858 */
859static bool
860bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
861{
862 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
863
Rasesh Mody8a891422010-08-25 23:00:27 -0700864 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700865 drv_fwhdr = (struct bfi_ioc_image_hdr *)
866 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
867
868 if (fwhdr.signature != drv_fwhdr->signature)
869 return false;
870
871 if (fwhdr.exec != drv_fwhdr->exec)
872 return false;
873
Rasesh Mody8a891422010-08-25 23:00:27 -0700874 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700875}
876
877/**
878 * Conditionally flush any pending message from firmware at start.
879 */
880static void
881bfa_ioc_msgflush(struct bfa_ioc *ioc)
882{
883 u32 r32;
884
885 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
886 if (r32)
887 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
888}
889
890/**
891 * @img ioc_init_logic.jpg
892 */
893static void
894bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
895{
896 enum bfi_ioc_state ioc_fwstate;
897 bool fwvalid;
898
899 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
900
901 if (force)
902 ioc_fwstate = BFI_IOC_UNINIT;
903
904 /**
905 * check if firmware is valid
906 */
907 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
908 false : bfa_ioc_fwver_valid(ioc);
909
910 if (!fwvalid) {
911 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
912 return;
913 }
914
915 /**
916 * If hardware initialization is in progress (initialized by other IOC),
917 * just wait for an initialization completion interrupt.
918 */
919 if (ioc_fwstate == BFI_IOC_INITING) {
920 ioc->cbfn->reset_cbfn(ioc->bfa);
921 return;
922 }
923
924 /**
925 * If IOC function is disabled and firmware version is same,
926 * just re-enable IOC.
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700927 */
Rasesh Mody2c7d3822010-12-23 21:45:06 +0000928 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700929 /**
930 * When using MSI-X any pending firmware ready event should
931 * be flushed. Otherwise MSI-X interrupts are not delivered.
932 */
933 bfa_ioc_msgflush(ioc);
934 ioc->cbfn->reset_cbfn(ioc->bfa);
935 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
936 return;
937 }
938
939 /**
940 * Initialize the h/w for any other states.
941 */
942 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
943}
944
945void
Rasesh Mody8a891422010-08-25 23:00:27 -0700946bfa_nw_ioc_timeout(void *ioc_arg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700947{
948 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
949
950 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
951}
952
Rasesh Mody8a891422010-08-25 23:00:27 -0700953static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700954bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
955{
956 u32 *msgp = (u32 *) ioc_msg;
957 u32 i;
958
959 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
960
961 /*
962 * first write msg to mailbox registers
963 */
964 for (i = 0; i < len / sizeof(u32); i++)
965 writel(cpu_to_le32(msgp[i]),
966 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
967
968 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
969 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
970
971 /*
972 * write 1 to mailbox CMD to trigger LPU event
973 */
974 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
975 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
976}
977
978static void
979bfa_ioc_send_enable(struct bfa_ioc *ioc)
980{
981 struct bfi_ioc_ctrl_req enable_req;
982 struct timeval tv;
983
984 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
985 bfa_ioc_portid(ioc));
986 enable_req.ioc_class = ioc->ioc_mc;
987 do_gettimeofday(&tv);
988 enable_req.tv_sec = ntohl(tv.tv_sec);
989 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
990}
991
992static void
993bfa_ioc_send_disable(struct bfa_ioc *ioc)
994{
995 struct bfi_ioc_ctrl_req disable_req;
996
997 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
998 bfa_ioc_portid(ioc));
999 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1000}
1001
1002static void
1003bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1004{
1005 struct bfi_ioc_getattr_req attr_req;
1006
1007 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1008 bfa_ioc_portid(ioc));
1009 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1010 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1011}
1012
1013void
Rasesh Mody8a891422010-08-25 23:00:27 -07001014bfa_nw_ioc_hb_check(void *cbarg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001015{
1016 struct bfa_ioc *ioc = cbarg;
1017 u32 hb_count;
1018
1019 hb_count = readl(ioc->ioc_regs.heartbeat);
1020 if (ioc->hb_count == hb_count) {
1021 pr_crit("Firmware heartbeat failure at %d", hb_count);
1022 bfa_ioc_recover(ioc);
1023 return;
1024 } else {
1025 ioc->hb_count = hb_count;
1026 }
1027
1028 bfa_ioc_mbox_poll(ioc);
1029 mod_timer(&ioc->hb_timer, jiffies +
1030 msecs_to_jiffies(BFA_IOC_HB_TOV));
1031}
1032
1033static void
1034bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1035{
1036 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1037 mod_timer(&ioc->hb_timer, jiffies +
1038 msecs_to_jiffies(BFA_IOC_HB_TOV));
1039}
1040
1041static void
1042bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1043{
1044 del_timer(&ioc->hb_timer);
1045}
1046
1047/**
1048 * @brief
1049 * Initiate a full firmware download.
1050 */
1051static void
1052bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1053 u32 boot_param)
1054{
1055 u32 *fwimg;
1056 u32 pgnum, pgoff;
1057 u32 loff = 0;
1058 u32 chunkno = 0;
1059 u32 i;
1060
1061 /**
1062 * Initialize LMEM first before code download
1063 */
1064 bfa_ioc_lmem_init(ioc);
1065
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001066 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1067
1068 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1069 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1070
1071 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1072
1073 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1074 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1075 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1076 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1077 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1078 }
1079
1080 /**
1081 * write smem
1082 */
1083 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1084 ((ioc->ioc_regs.smem_page_start) + (loff)));
1085
1086 loff += sizeof(u32);
1087
1088 /**
1089 * handle page offset wrap around
1090 */
1091 loff = PSS_SMEM_PGOFF(loff);
1092 if (loff == 0) {
1093 pgnum++;
1094 writel(pgnum,
1095 ioc->ioc_regs.host_page_num_fn);
1096 }
1097 }
1098
1099 writel(bfa_ioc_smem_pgnum(ioc, 0),
1100 ioc->ioc_regs.host_page_num_fn);
1101
1102 /*
1103 * Set boot type and boot param at the end.
1104 */
1105 writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
1106 + (BFI_BOOT_TYPE_OFF)));
1107 writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
1108 + (BFI_BOOT_PARAM_OFF)));
1109}
1110
1111static void
1112bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1113{
1114 bfa_ioc_hwinit(ioc, force);
1115}
1116
1117/**
1118 * @brief
1119 * Update BFA configuration from firmware configuration.
1120 */
1121static void
1122bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1123{
1124 struct bfi_ioc_attr *attr = ioc->attr;
1125
1126 attr->adapter_prop = ntohl(attr->adapter_prop);
1127 attr->card_type = ntohl(attr->card_type);
1128 attr->maxfrsize = ntohs(attr->maxfrsize);
1129
1130 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1131}
1132
1133/**
1134 * Attach time initialization of mbox logic.
1135 */
1136static void
1137bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1138{
1139 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1140 int mc;
1141
1142 INIT_LIST_HEAD(&mod->cmd_q);
1143 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1144 mod->mbhdlr[mc].cbfn = NULL;
1145 mod->mbhdlr[mc].cbarg = ioc->bfa;
1146 }
1147}
1148
1149/**
1150 * Mbox poll timer -- restarts any pending mailbox requests.
1151 */
1152static void
1153bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1154{
1155 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1156 struct bfa_mbox_cmd *cmd;
1157 u32 stat;
1158
1159 /**
1160 * If no command pending, do nothing
1161 */
1162 if (list_empty(&mod->cmd_q))
1163 return;
1164
1165 /**
1166 * If previous command is not yet fetched by firmware, do nothing
1167 */
1168 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1169 if (stat)
1170 return;
1171
1172 /**
1173 * Enqueue command to firmware.
1174 */
1175 bfa_q_deq(&mod->cmd_q, &cmd);
1176 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1177}
1178
1179/**
1180 * Cleanup any pending requests.
1181 */
1182static void
1183bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1184{
1185 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1186 struct bfa_mbox_cmd *cmd;
1187
1188 while (!list_empty(&mod->cmd_q))
1189 bfa_q_deq(&mod->cmd_q, &cmd);
1190}
1191
1192/**
1193 * IOC public
1194 */
Rasesh Mody8a891422010-08-25 23:00:27 -07001195static enum bfa_status
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001196bfa_ioc_pll_init(struct bfa_ioc *ioc)
1197{
1198 /*
1199 * Hold semaphore so that nobody can access the chip during init.
1200 */
Rasesh Mody8a891422010-08-25 23:00:27 -07001201 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001202
1203 bfa_ioc_pll_init_asic(ioc);
1204
1205 ioc->pllinit = true;
1206 /*
1207 * release semaphore.
1208 */
Rasesh Mody8a891422010-08-25 23:00:27 -07001209 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001210
1211 return BFA_STATUS_OK;
1212}
1213
1214/**
1215 * Interface used by diag module to do firmware boot with memory test
1216 * as the entry vector.
1217 */
Rasesh Mody8a891422010-08-25 23:00:27 -07001218static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001219bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1220{
1221 void __iomem *rb;
1222
1223 bfa_ioc_stats(ioc, ioc_boots);
1224
1225 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1226 return;
1227
1228 /**
1229 * Initialize IOC state of all functions on a chip reset.
1230 */
1231 rb = ioc->pcidev.pci_bar_kva;
1232 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1233 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1234 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1235 } else {
1236 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1237 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1238 }
1239
1240 bfa_ioc_msgflush(ioc);
1241 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1242
1243 /**
1244 * Enable interrupts just before starting LPU
1245 */
1246 ioc->cbfn->reset_cbfn(ioc->bfa);
1247 bfa_ioc_lpu_start(ioc);
1248}
1249
1250/**
1251 * Enable/disable IOC failure auto recovery.
1252 */
1253void
Rasesh Mody8a891422010-08-25 23:00:27 -07001254bfa_nw_ioc_auto_recover(bool auto_recover)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001255{
Rasesh Mody8a891422010-08-25 23:00:27 -07001256 bfa_nw_auto_recover = auto_recover;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001257}
1258
Rasesh Mody8a891422010-08-25 23:00:27 -07001259static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001260bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1261{
1262 u32 *msgp = mbmsg;
1263 u32 r32;
1264 int i;
1265
1266 /**
1267 * read the MBOX msg
1268 */
1269 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1270 i++) {
1271 r32 = readl(ioc->ioc_regs.lpu_mbox +
1272 i * sizeof(u32));
1273 msgp[i] = htonl(r32);
1274 }
1275
1276 /**
1277 * turn off mailbox interrupt by clearing mailbox status
1278 */
1279 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1280 readl(ioc->ioc_regs.lpu_mbox_cmd);
1281}
1282
Rasesh Mody8a891422010-08-25 23:00:27 -07001283static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001284bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1285{
1286 union bfi_ioc_i2h_msg_u *msg;
1287
1288 msg = (union bfi_ioc_i2h_msg_u *) m;
1289
1290 bfa_ioc_stats(ioc, ioc_isrs);
1291
1292 switch (msg->mh.msg_id) {
1293 case BFI_IOC_I2H_HBEAT:
1294 break;
1295
1296 case BFI_IOC_I2H_READY_EVENT:
1297 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1298 break;
1299
1300 case BFI_IOC_I2H_ENABLE_REPLY:
1301 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1302 break;
1303
1304 case BFI_IOC_I2H_DISABLE_REPLY:
1305 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1306 break;
1307
1308 case BFI_IOC_I2H_GETATTR_REPLY:
1309 bfa_ioc_getattr_reply(ioc);
1310 break;
1311
1312 default:
1313 BUG_ON(1);
1314 }
1315}
1316
1317/**
1318 * IOC attach time initialization and setup.
1319 *
1320 * @param[in] ioc memory for IOC
1321 * @param[in] bfa driver instance structure
1322 */
1323void
Rasesh Mody8a891422010-08-25 23:00:27 -07001324bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001325{
1326 ioc->bfa = bfa;
1327 ioc->cbfn = cbfn;
1328 ioc->fcmode = false;
1329 ioc->pllinit = false;
1330 ioc->dbg_fwsave_once = true;
1331
1332 bfa_ioc_mbox_attach(ioc);
1333 INIT_LIST_HEAD(&ioc->hb_notify_q);
1334
1335 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1336}
1337
1338/**
1339 * Driver detach time IOC cleanup.
1340 */
1341void
Rasesh Mody8a891422010-08-25 23:00:27 -07001342bfa_nw_ioc_detach(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001343{
1344 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1345}
1346
1347/**
1348 * Setup IOC PCI properties.
1349 *
1350 * @param[in] pcidev PCI device information for this IOC
1351 */
1352void
Rasesh Mody8a891422010-08-25 23:00:27 -07001353bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001354 enum bfi_mclass mc)
1355{
1356 ioc->ioc_mc = mc;
1357 ioc->pcidev = *pcidev;
1358 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1359 ioc->cna = ioc->ctdev && !ioc->fcmode;
1360
Rasesh Mody8a891422010-08-25 23:00:27 -07001361 bfa_nw_ioc_set_ct_hwif(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001362
1363 bfa_ioc_map_port(ioc);
1364 bfa_ioc_reg_init(ioc);
1365}
1366
1367/**
1368 * Initialize IOC dma memory
1369 *
1370 * @param[in] dm_kva kernel virtual address of IOC dma memory
1371 * @param[in] dm_pa physical address of IOC dma memory
1372 */
1373void
Rasesh Mody8a891422010-08-25 23:00:27 -07001374bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001375{
1376 /**
1377 * dma memory for firmware attribute
1378 */
1379 ioc->attr_dma.kva = dm_kva;
1380 ioc->attr_dma.pa = dm_pa;
1381 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1382}
1383
1384/**
1385 * Return size of dma memory required.
1386 */
1387u32
Rasesh Mody8a891422010-08-25 23:00:27 -07001388bfa_nw_ioc_meminfo(void)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001389{
1390 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1391}
1392
1393void
Rasesh Mody8a891422010-08-25 23:00:27 -07001394bfa_nw_ioc_enable(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001395{
1396 bfa_ioc_stats(ioc, ioc_enables);
1397 ioc->dbg_fwsave_once = true;
1398
1399 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1400}
1401
1402void
Rasesh Mody8a891422010-08-25 23:00:27 -07001403bfa_nw_ioc_disable(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001404{
1405 bfa_ioc_stats(ioc, ioc_disables);
1406 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1407}
1408
Rasesh Mody8a891422010-08-25 23:00:27 -07001409static u32
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001410bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1411{
1412 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1413}
1414
Rasesh Mody8a891422010-08-25 23:00:27 -07001415static u32
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001416bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1417{
1418 return PSS_SMEM_PGOFF(fmaddr);
1419}
1420
1421/**
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001422 * Register mailbox message handler function, to be called by common modules
1423 */
1424void
Rasesh Mody8a891422010-08-25 23:00:27 -07001425bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001426 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1427{
1428 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1429
1430 mod->mbhdlr[mc].cbfn = cbfn;
1431 mod->mbhdlr[mc].cbarg = cbarg;
1432}
1433
1434/**
1435 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1436 * Responsibility of caller to serialize
1437 *
1438 * @param[in] ioc IOC instance
1439 * @param[i] cmd Mailbox command
1440 */
1441void
Rasesh Mody8a891422010-08-25 23:00:27 -07001442bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001443{
1444 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1445 u32 stat;
1446
1447 /**
1448 * If a previous command is pending, queue new command
1449 */
1450 if (!list_empty(&mod->cmd_q)) {
1451 list_add_tail(&cmd->qe, &mod->cmd_q);
1452 return;
1453 }
1454
1455 /**
1456 * If mailbox is busy, queue command for poll timer
1457 */
1458 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1459 if (stat) {
1460 list_add_tail(&cmd->qe, &mod->cmd_q);
1461 return;
1462 }
1463
1464 /**
1465 * mailbox is free -- queue command to firmware
1466 */
1467 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1468}
1469
1470/**
1471 * Handle mailbox interrupts
1472 */
1473void
Rasesh Mody8a891422010-08-25 23:00:27 -07001474bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001475{
1476 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1477 struct bfi_mbmsg m;
1478 int mc;
1479
1480 bfa_ioc_msgget(ioc, &m);
1481
1482 /**
1483 * Treat IOC message class as special.
1484 */
1485 mc = m.mh.msg_class;
1486 if (mc == BFI_MC_IOC) {
1487 bfa_ioc_isr(ioc, &m);
1488 return;
1489 }
1490
Dan Carpenter07465562010-09-19 11:25:54 -07001491 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001492 return;
1493
1494 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1495}
1496
1497void
Rasesh Mody8a891422010-08-25 23:00:27 -07001498bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001499{
1500 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1501}
1502
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001503/**
1504 * Add to IOC heartbeat failure notification queue. To be used by common
1505 * modules such as cee, port, diag.
1506 */
1507void
Rasesh Mody8a891422010-08-25 23:00:27 -07001508bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001509 struct bfa_ioc_hbfail_notify *notify)
1510{
1511 list_add_tail(&notify->qe, &ioc->hb_notify_q);
1512}
1513
1514#define BFA_MFG_NAME "Brocade"
Rasesh Mody8a891422010-08-25 23:00:27 -07001515static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001516bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
1517 struct bfa_adapter_attr *ad_attr)
1518{
1519 struct bfi_ioc_attr *ioc_attr;
1520
1521 ioc_attr = ioc->attr;
1522
1523 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1524 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1525 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1526 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1527 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1528 sizeof(struct bfa_mfg_vpd));
1529
1530 ad_attr->nports = bfa_ioc_get_nports(ioc);
1531 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1532
1533 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1534 /* For now, model descr uses same model string */
1535 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1536
1537 ad_attr->card_type = ioc_attr->card_type;
1538 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1539
1540 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1541 ad_attr->prototype = 1;
1542 else
1543 ad_attr->prototype = 0;
1544
1545 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
Rasesh Mody8a891422010-08-25 23:00:27 -07001546 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001547
1548 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1549 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1550 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1551 ad_attr->asic_rev = ioc_attr->asic_rev;
1552
1553 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1554
1555 ad_attr->cna_capable = ioc->cna;
1556 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
1557}
1558
Rasesh Mody8a891422010-08-25 23:00:27 -07001559static enum bfa_ioc_type
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001560bfa_ioc_get_type(struct bfa_ioc *ioc)
1561{
1562 if (!ioc->ctdev || ioc->fcmode)
1563 return BFA_IOC_TYPE_FC;
1564 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1565 return BFA_IOC_TYPE_FCoE;
1566 else if (ioc->ioc_mc == BFI_MC_LL)
1567 return BFA_IOC_TYPE_LL;
1568 else {
1569 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
1570 return BFA_IOC_TYPE_LL;
1571 }
1572}
1573
Rasesh Mody8a891422010-08-25 23:00:27 -07001574static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001575bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
1576{
1577 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1578 memcpy(serial_num,
1579 (void *)ioc->attr->brcd_serialnum,
1580 BFA_ADAPTER_SERIAL_NUM_LEN);
1581}
1582
Rasesh Mody8a891422010-08-25 23:00:27 -07001583static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001584bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
1585{
1586 memset(fw_ver, 0, BFA_VERSION_LEN);
1587 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1588}
1589
Rasesh Mody8a891422010-08-25 23:00:27 -07001590static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001591bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
1592{
1593 BUG_ON(!(chip_rev));
1594
1595 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1596
1597 chip_rev[0] = 'R';
1598 chip_rev[1] = 'e';
1599 chip_rev[2] = 'v';
1600 chip_rev[3] = '-';
1601 chip_rev[4] = ioc->attr->asic_rev;
1602 chip_rev[5] = '\0';
1603}
1604
Rasesh Mody8a891422010-08-25 23:00:27 -07001605static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001606bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
1607{
1608 memset(optrom_ver, 0, BFA_VERSION_LEN);
1609 memcpy(optrom_ver, ioc->attr->optrom_version,
1610 BFA_VERSION_LEN);
1611}
1612
Rasesh Mody8a891422010-08-25 23:00:27 -07001613static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001614bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
1615{
1616 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1617 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1618}
1619
Rasesh Mody8a891422010-08-25 23:00:27 -07001620static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001621bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
1622{
1623 struct bfi_ioc_attr *ioc_attr;
1624
1625 BUG_ON(!(model));
1626 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1627
1628 ioc_attr = ioc->attr;
1629
1630 /**
1631 * model name
1632 */
1633 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1634 BFA_MFG_NAME, ioc_attr->card_type);
1635}
1636
Rasesh Mody8a891422010-08-25 23:00:27 -07001637static enum bfa_ioc_state
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001638bfa_ioc_get_state(struct bfa_ioc *ioc)
1639{
1640 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1641}
1642
1643void
Rasesh Mody8a891422010-08-25 23:00:27 -07001644bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001645{
1646 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
1647
1648 ioc_attr->state = bfa_ioc_get_state(ioc);
1649 ioc_attr->port_id = ioc->port_id;
1650
1651 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1652
1653 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1654
1655 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1656 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1657 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1658}
1659
1660/**
1661 * WWN public
1662 */
Rasesh Mody8a891422010-08-25 23:00:27 -07001663static u64
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001664bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
1665{
1666 return ioc->attr->pwwn;
1667}
1668
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001669mac_t
Rasesh Mody8a891422010-08-25 23:00:27 -07001670bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001671{
Rasesh Mody2c7d3822010-12-23 21:45:06 +00001672 return ioc->attr->mac;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001673}
1674
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001675/**
1676 * Firmware failure detected. Start recovery actions.
1677 */
1678static void
1679bfa_ioc_recover(struct bfa_ioc *ioc)
1680{
1681 bfa_ioc_stats(ioc, ioc_hbfails);
1682 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
1683}
1684
1685static void
1686bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
1687{
1688 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
1689 return;
1690
1691}