2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include "bfi_ctreg.h"
21 #include "bfa_defs_svc.h"
24 BFA_TRC_FILE(CNA, IOC);
27 * IOC local definitions
29 #define BFA_IOC_TOV 3000 /* msecs */
30 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
31 #define BFA_IOC_HB_TOV 500 /* msecs */
32 #define BFA_IOC_HWINIT_MAX 2
33 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
35 #define bfa_ioc_timer_start(__ioc) \
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
37 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
38 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
40 #define bfa_hb_timer_start(__ioc) \
41 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
42 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
45 #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
46 #define BFA_DBG_FWTRC_LEN \
47 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
48 (sizeof(struct bfa_trc_mod_s) - \
49 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
50 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
53 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
56 #define bfa_ioc_firmware_lock(__ioc) \
57 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
58 #define bfa_ioc_firmware_unlock(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
60 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
61 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
62 #define bfa_ioc_notify_hbfail(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
65 #ifdef BFA_IOC_IS_UEFI
66 #define bfa_ioc_is_bios_optrom(__ioc) (0)
67 #define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
69 #define bfa_ioc_is_bios_optrom(__ioc) \
70 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
71 #define bfa_ioc_is_uefi(__ioc) (0)
74 #define bfa_ioc_mbox_cmd_pending(__ioc) \
75 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
76 bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd))
78 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
81 * forward declarations
83 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
86 static void bfa_ioc_timeout(void *ioc);
87 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
92 static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
93 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
97 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
98 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
99 static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc);
100 static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
101 static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
102 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
109 * IOC state machine definitions/declarations
112 IOC_E_RESET = 1, /* IOC reset request */
113 IOC_E_ENABLE = 2, /* IOC enable request */
114 IOC_E_DISABLE = 3, /* IOC disable request */
115 IOC_E_DETACH = 4, /* driver detach cleanup */
116 IOC_E_ENABLED = 5, /* f/w enabled */
117 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
118 IOC_E_DISABLED = 7, /* f/w disabled */
119 IOC_E_FAILED = 8, /* failure notice by iocpf sm */
120 IOC_E_HBFAIL = 9, /* heartbeat failure */
121 IOC_E_HWERROR = 10, /* hardware error interrupt */
122 IOC_E_TIMEOUT = 11, /* timeout */
125 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
129 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
130 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
131 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
132 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
133 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
135 static struct bfa_sm_table_s ioc_sm_table[] = {
136 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
137 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
138 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
139 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
140 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
141 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
142 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
143 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
148 * IOCPF state machine definitions/declarations
151 #define bfa_iocpf_timer_start(__ioc) \
152 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
153 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
154 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
156 #define bfa_iocpf_recovery_timer_start(__ioc) \
157 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
158 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
160 #define bfa_sem_timer_start(__ioc) \
161 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
162 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
163 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
166 * Forward declareations for iocpf state machine
168 static void bfa_iocpf_enable(struct bfa_ioc_s *ioc);
169 static void bfa_iocpf_disable(struct bfa_ioc_s *ioc);
170 static void bfa_iocpf_fail(struct bfa_ioc_s *ioc);
171 static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc);
172 static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc);
173 static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
174 static void bfa_iocpf_timeout(void *ioc_arg);
175 static void bfa_iocpf_sem_timeout(void *ioc_arg);
178 * IOCPF state machine events
181 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
182 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
183 IOCPF_E_STOP = 3, /* stop on driver detach */
184 IOCPF_E_FWREADY = 4, /* f/w initialization done */
185 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
186 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
187 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
188 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
189 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
190 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
191 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
197 enum bfa_iocpf_state {
198 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
199 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
200 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
201 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
202 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
203 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
204 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
205 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
206 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
209 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
211 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
212 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
213 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
214 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
215 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
216 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
217 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
218 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
219 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
221 static struct bfa_sm_table_s iocpf_sm_table[] = {
222 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
223 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
224 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
225 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
226 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
227 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
228 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
229 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
230 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
231 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
240 * Beginning state. IOC uninit state.
244 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
249 * IOC is in uninit state.
252 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
258 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
262 bfa_sm_fault(ioc, event);
266 * Reset entry actions -- initialize state machine
269 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
271 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
275 * IOC is in reset state.
278 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
284 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
288 bfa_ioc_disable_comp(ioc);
292 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
296 bfa_sm_fault(ioc, event);
302 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
304 bfa_iocpf_enable(ioc);
308 * Host IOC function is being enabled, awaiting response from firmware.
309 * Semaphore is acquired.
312 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
318 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
322 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
326 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
327 bfa_iocpf_initfail(ioc);
331 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
343 bfa_sm_fault(ioc, event);
349 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
351 bfa_ioc_timer_start(ioc);
352 bfa_ioc_send_getattr(ioc);
356 * IOC configuration in progress. Timer is active.
359 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
364 case IOC_E_FWRSP_GETATTR:
365 bfa_ioc_timer_stop(ioc);
366 bfa_ioc_check_attr_wwns(ioc);
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
371 bfa_ioc_timer_stop(ioc);
372 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
376 bfa_ioc_timer_stop(ioc);
380 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
381 bfa_iocpf_getattrfail(ioc);
385 bfa_ioc_timer_stop(ioc);
386 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
393 bfa_sm_fault(ioc, event);
399 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
401 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
403 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
404 bfa_ioc_hb_monitor(ioc);
405 BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n");
409 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
418 bfa_ioc_hb_stop(ioc);
419 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
423 bfa_ioc_hb_stop(ioc);
424 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
428 bfa_ioc_hb_stop(ioc);
429 /* !!! fall through !!! */
432 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
437 bfa_sm_fault(ioc, event);
443 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
445 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
446 bfa_iocpf_disable(ioc);
447 BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
451 * IOC is being disabled
454 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
460 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
465 * No state change. Will move to disabled state
466 * after iocpf sm completes failure processing and
467 * moves to disabled state.
473 bfa_sm_fault(ioc, event);
478 * IOC disable completion entry.
481 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
483 bfa_ioc_disable_comp(ioc);
487 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
493 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
497 ioc->cbfn->disable_cbfn(ioc->bfa);
501 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
506 bfa_sm_fault(ioc, event);
512 bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
518 * Hardware initialization failed.
521 bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
532 * Initialization failure during iocpf init retry.
534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
538 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
542 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
547 bfa_sm_fault(ioc, event);
553 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
555 struct list_head *qe;
556 struct bfa_ioc_hbfail_notify_s *notify;
557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
560 * Notify driver and common modules registered for notification.
562 ioc->cbfn->hbfail_cbfn(ioc->bfa);
563 list_for_each(qe, &ioc->hb_notify_q) {
564 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
565 notify->cbfn(notify->cbarg);
568 BFA_LOG(KERN_CRIT, bfad, log_level,
569 "Heart Beat of IOC has failed\n");
576 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
584 * Initialization failure during iocpf recovery.
585 * !!! Fall through !!!
588 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
592 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
596 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
601 * HB failure notification, ignore.
605 bfa_sm_fault(ioc, event);
612 * IOCPF State Machine
617 * Reset entry actions -- initialize state machine
620 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
622 iocpf->retry_count = 0;
623 iocpf->auto_recover = bfa_auto_recover;
627 * Beginning state. IOC is in reset state.
630 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
632 struct bfa_ioc_s *ioc = iocpf->ioc;
638 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
645 bfa_sm_fault(ioc, event);
650 * Semaphore should be acquired for version check.
653 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
655 bfa_ioc_hw_sem_get(iocpf->ioc);
659 * Awaiting h/w semaphore to continue with version check.
662 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
664 struct bfa_ioc_s *ioc = iocpf->ioc;
669 case IOCPF_E_SEMLOCKED:
670 if (bfa_ioc_firmware_lock(ioc)) {
671 iocpf->retry_count = 0;
672 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
674 bfa_ioc_hw_sem_release(ioc);
675 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
679 case IOCPF_E_DISABLE:
680 bfa_ioc_hw_sem_get_cancel(ioc);
681 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
682 bfa_ioc_pf_disabled(ioc);
686 bfa_ioc_hw_sem_get_cancel(ioc);
687 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
691 bfa_sm_fault(ioc, event);
696 * Notify enable completion callback.
699 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
702 * Call only the first time sm enters fwmismatch state.
704 if (iocpf->retry_count == 0)
705 bfa_ioc_pf_fwmismatch(iocpf->ioc);
707 iocpf->retry_count++;
708 bfa_iocpf_timer_start(iocpf->ioc);
712 * Awaiting firmware version match.
715 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
717 struct bfa_ioc_s *ioc = iocpf->ioc;
722 case IOCPF_E_TIMEOUT:
723 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
726 case IOCPF_E_DISABLE:
727 bfa_iocpf_timer_stop(ioc);
728 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
729 bfa_ioc_pf_disabled(ioc);
733 bfa_iocpf_timer_stop(ioc);
734 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
738 bfa_sm_fault(ioc, event);
743 * Request for semaphore.
746 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
748 bfa_ioc_hw_sem_get(iocpf->ioc);
752 * Awaiting semaphore for h/w initialzation.
755 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
757 struct bfa_ioc_s *ioc = iocpf->ioc;
762 case IOCPF_E_SEMLOCKED:
763 iocpf->retry_count = 0;
764 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
767 case IOCPF_E_DISABLE:
768 bfa_ioc_hw_sem_get_cancel(ioc);
769 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
773 bfa_sm_fault(ioc, event);
779 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
781 bfa_iocpf_timer_start(iocpf->ioc);
782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
786 * Hardware is being initialized. Interrupts are enabled.
787 * Holding hardware semaphore lock.
790 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
792 struct bfa_ioc_s *ioc = iocpf->ioc;
797 case IOCPF_E_FWREADY:
798 bfa_iocpf_timer_stop(ioc);
799 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
802 case IOCPF_E_INITFAIL:
803 bfa_iocpf_timer_stop(ioc);
805 * !!! fall through !!!
808 case IOCPF_E_TIMEOUT:
809 iocpf->retry_count++;
810 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
811 bfa_iocpf_timer_start(ioc);
812 bfa_ioc_reset(ioc, BFA_TRUE);
816 bfa_ioc_hw_sem_release(ioc);
817 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
819 if (event == IOCPF_E_TIMEOUT)
820 bfa_ioc_pf_failed(ioc);
823 case IOCPF_E_DISABLE:
824 bfa_ioc_hw_sem_release(ioc);
825 bfa_iocpf_timer_stop(ioc);
826 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
830 bfa_sm_fault(ioc, event);
836 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
838 bfa_iocpf_timer_start(iocpf->ioc);
839 bfa_ioc_send_enable(iocpf->ioc);
843 * Host IOC function is being enabled, awaiting response from firmware.
844 * Semaphore is acquired.
847 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
849 struct bfa_ioc_s *ioc = iocpf->ioc;
854 case IOCPF_E_FWRSP_ENABLE:
855 bfa_iocpf_timer_stop(ioc);
856 bfa_ioc_hw_sem_release(ioc);
857 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
860 case IOCPF_E_INITFAIL:
861 bfa_iocpf_timer_stop(ioc);
863 * !!! fall through !!!
866 case IOCPF_E_TIMEOUT:
867 iocpf->retry_count++;
868 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
869 bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
871 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
875 bfa_ioc_hw_sem_release(ioc);
876 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
878 if (event == IOCPF_E_TIMEOUT)
879 bfa_ioc_pf_failed(ioc);
882 case IOCPF_E_DISABLE:
883 bfa_iocpf_timer_stop(ioc);
884 bfa_ioc_hw_sem_release(ioc);
885 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
888 case IOCPF_E_FWREADY:
889 bfa_ioc_send_enable(ioc);
893 bfa_sm_fault(ioc, event);
900 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
902 bfa_ioc_pf_enabled(iocpf->ioc);
906 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
908 struct bfa_ioc_s *ioc = iocpf->ioc;
913 case IOCPF_E_DISABLE:
914 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
917 case IOCPF_E_GETATTRFAIL:
918 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
922 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
925 case IOCPF_E_FWREADY:
926 if (bfa_ioc_is_operational(ioc))
927 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
929 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
931 bfa_ioc_pf_failed(ioc);
935 bfa_sm_fault(ioc, event);
941 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
943 bfa_iocpf_timer_start(iocpf->ioc);
944 bfa_ioc_send_disable(iocpf->ioc);
948 * IOC is being disabled
951 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
953 struct bfa_ioc_s *ioc = iocpf->ioc;
958 case IOCPF_E_FWRSP_DISABLE:
959 case IOCPF_E_FWREADY:
960 bfa_iocpf_timer_stop(ioc);
961 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
965 bfa_iocpf_timer_stop(ioc);
967 * !!! fall through !!!
970 case IOCPF_E_TIMEOUT:
971 bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
972 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
975 case IOCPF_E_FWRSP_ENABLE:
979 bfa_sm_fault(ioc, event);
984 * IOC disable completion entry.
987 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
989 bfa_ioc_pf_disabled(iocpf->ioc);
993 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
995 struct bfa_ioc_s *ioc = iocpf->ioc;
1000 case IOCPF_E_ENABLE:
1001 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1005 bfa_ioc_firmware_unlock(ioc);
1006 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1010 bfa_sm_fault(ioc, event);
1016 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1018 bfa_iocpf_timer_start(iocpf->ioc);
1022 * Hardware initialization failed.
1025 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1027 struct bfa_ioc_s *ioc = iocpf->ioc;
1029 bfa_trc(ioc, event);
1032 case IOCPF_E_DISABLE:
1033 bfa_iocpf_timer_stop(ioc);
1034 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1038 bfa_iocpf_timer_stop(ioc);
1039 bfa_ioc_firmware_unlock(ioc);
1040 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1043 case IOCPF_E_TIMEOUT:
1044 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1048 bfa_sm_fault(ioc, event);
1054 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1057 * Mark IOC as failed in hardware and stop firmware.
1059 bfa_ioc_lpu_stop(iocpf->ioc);
1060 bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
1063 * Notify other functions on HB failure.
1065 bfa_ioc_notify_hbfail(iocpf->ioc);
1068 * Flush any queued up mailbox requests.
1070 bfa_ioc_mbox_hbfail(iocpf->ioc);
1072 if (iocpf->auto_recover)
1073 bfa_iocpf_recovery_timer_start(iocpf->ioc);
1077 * IOC is in failed state.
1080 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1082 struct bfa_ioc_s *ioc = iocpf->ioc;
1084 bfa_trc(ioc, event);
1087 case IOCPF_E_DISABLE:
1088 if (iocpf->auto_recover)
1089 bfa_iocpf_timer_stop(ioc);
1090 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1093 case IOCPF_E_TIMEOUT:
1094 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1098 bfa_sm_fault(ioc, event);
1105 * hal_ioc_pvt BFA IOC private functions
1109 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1111 struct list_head *qe;
1112 struct bfa_ioc_hbfail_notify_s *notify;
1114 ioc->cbfn->disable_cbfn(ioc->bfa);
1117 * Notify common modules registered for notification.
1119 list_for_each(qe, &ioc->hb_notify_q) {
1120 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1121 notify->cbfn(notify->cbarg);
1126 bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
1130 #define BFA_SEM_SPINCNT 3000
1132 r32 = bfa_reg_read(sem_reg);
1134 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1137 r32 = bfa_reg_read(sem_reg);
1143 bfa_assert(cnt < BFA_SEM_SPINCNT);
1148 bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
1150 bfa_reg_write(sem_reg, 1);
1154 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1159 * First read to the semaphore register will return 0, subsequent reads
1160 * will return 1. Semaphore is released by writing 1 to the register
1162 r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
1164 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1168 bfa_sem_timer_start(ioc);
1172 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
1174 bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
1178 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
1180 bfa_sem_timer_stop(ioc);
1184 * Initialize LPU local memory (aka secondary memory / SRAM)
1187 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1191 #define PSS_LMEM_INIT_TIME 10000
1193 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
1194 pss_ctl &= ~__PSS_LMEM_RESET;
1195 pss_ctl |= __PSS_LMEM_INIT_EN;
1198 * i2c workaround 12.5khz clock
1200 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1201 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
1204 * wait for memory initialization to be complete
1208 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
1210 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1213 * If memory initialization is not successful, IOC timeout will catch
1216 bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
1217 bfa_trc(ioc, pss_ctl);
1219 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1220 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
1224 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1229 * Take processor out of reset.
1231 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
1232 pss_ctl &= ~__PSS_LPU0_RESET;
1234 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
1238 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1243 * Put processors in reset.
1245 pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
1246 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1248 bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
1252 * Get driver and firmware versions.
1255 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1260 u32 *fwsig = (u32 *) fwhdr;
1262 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1263 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1264 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1266 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1269 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1270 loff += sizeof(u32);
1275 * Returns TRUE if same.
1278 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1280 struct bfi_ioc_image_hdr_s *drv_fwhdr;
1283 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1284 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1286 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1287 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1289 bfa_trc(ioc, fwhdr->md5sum[i]);
1290 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1295 bfa_trc(ioc, fwhdr->md5sum[0]);
1300 * Return true if current running version is valid. Firmware signature and
1301 * execution context (driver/bios) must match.
1303 static bfa_boolean_t
1304 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1306 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1309 * If bios/efi boot (flash based) -- return true
1311 if (bfa_ioc_is_bios_optrom(ioc))
1314 bfa_ioc_fwver_get(ioc, &fwhdr);
1315 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1316 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1318 if (fwhdr.signature != drv_fwhdr->signature) {
1319 bfa_trc(ioc, fwhdr.signature);
1320 bfa_trc(ioc, drv_fwhdr->signature);
1324 if (bfa_os_swap32(fwhdr.param) != boot_env) {
1325 bfa_trc(ioc, fwhdr.param);
1326 bfa_trc(ioc, boot_env);
1330 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1334 * Conditionally flush any pending message from firmware at start.
1337 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1341 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
1343 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
1348 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1350 enum bfi_ioc_state ioc_fwstate;
1351 bfa_boolean_t fwvalid;
1355 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
1358 ioc_fwstate = BFI_IOC_UNINIT;
1360 bfa_trc(ioc, ioc_fwstate);
1362 boot_type = BFI_BOOT_TYPE_NORMAL;
1363 boot_env = BFI_BOOT_LOADER_OS;
1366 * Flash based firmware boot BIOS env.
1368 if (bfa_ioc_is_bios_optrom(ioc)) {
1369 boot_type = BFI_BOOT_TYPE_FLASH;
1370 boot_env = BFI_BOOT_LOADER_BIOS;
1374 * Flash based firmware boot UEFI env.
1376 if (bfa_ioc_is_uefi(ioc)) {
1377 boot_type = BFI_BOOT_TYPE_FLASH;
1378 boot_env = BFI_BOOT_LOADER_UEFI;
1382 * check if firmware is valid
1384 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1385 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1388 bfa_ioc_boot(ioc, boot_type, boot_env);
1393 * If hardware initialization is in progress (initialized by other IOC),
1394 * just wait for an initialization completion interrupt.
1396 if (ioc_fwstate == BFI_IOC_INITING) {
1397 ioc->cbfn->reset_cbfn(ioc->bfa);
1402 * If IOC function is disabled and firmware version is same,
1403 * just re-enable IOC.
1405 * If option rom, IOC must not be in operational state. With
1406 * convergence, IOC will be in operational state when 2nd driver
1409 if (ioc_fwstate == BFI_IOC_DISABLED ||
1410 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
1413 * When using MSI-X any pending firmware ready event should
1414 * be flushed. Otherwise MSI-X interrupts are not delivered.
1416 bfa_ioc_msgflush(ioc);
1417 ioc->cbfn->reset_cbfn(ioc->bfa);
1418 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1423 * Initialize the h/w for any other states.
1425 bfa_ioc_boot(ioc, boot_type, boot_env);
1429 bfa_ioc_timeout(void *ioc_arg)
1431 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1434 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1438 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1440 u32 *msgp = (u32 *) ioc_msg;
1443 bfa_trc(ioc, msgp[0]);
1446 bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
1449 * first write msg to mailbox registers
1451 for (i = 0; i < len / sizeof(u32); i++)
1452 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
1453 bfa_os_wtole(msgp[i]));
1455 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1456 bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
1459 * write 1 to mailbox CMD to trigger LPU event
1461 bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
1462 (void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1466 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1468 struct bfi_ioc_ctrl_req_s enable_req;
1469 struct bfa_timeval_s tv;
1471 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1472 bfa_ioc_portid(ioc));
1473 enable_req.ioc_class = ioc->ioc_mc;
1474 bfa_os_gettimeofday(&tv);
1475 enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec);
1476 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1480 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1482 struct bfi_ioc_ctrl_req_s disable_req;
1484 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1485 bfa_ioc_portid(ioc));
1486 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1490 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1492 struct bfi_ioc_getattr_req_s attr_req;
1494 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1495 bfa_ioc_portid(ioc));
1496 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1497 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1501 bfa_ioc_hb_check(void *cbarg)
1503 struct bfa_ioc_s *ioc = cbarg;
1506 hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1507 if (ioc->hb_count == hb_count) {
1508 printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
1509 bfa_ioc_recover(ioc);
1512 ioc->hb_count = hb_count;
1515 bfa_ioc_mbox_poll(ioc);
1516 bfa_hb_timer_start(ioc);
1520 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1522 ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
1523 bfa_hb_timer_start(ioc);
1527 bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1529 bfa_hb_timer_stop(ioc);
1534 * Initiate a full firmware download.
1537 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1547 * Initialize LMEM first before code download
1549 bfa_ioc_lmem_init(ioc);
1551 bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1552 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1554 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1555 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1557 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1559 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1561 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1562 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1563 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1564 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1570 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1571 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1573 loff += sizeof(u32);
1576 * handle page offset wrap around
1578 loff = PSS_SMEM_PGOFF(loff);
1581 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1586 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1587 bfa_ioc_smem_pgnum(ioc, 0));
1590 * Set boot type and boot param at the end.
1592 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1593 bfa_os_swap32(boot_type));
1594 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
1595 bfa_os_swap32(boot_env));
1599 bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1601 bfa_ioc_hwinit(ioc, force);
1605 * Update BFA configuration from firmware configuration.
1608 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1610 struct bfi_ioc_attr_s *attr = ioc->attr;
1612 attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop);
1613 attr->card_type = bfa_os_ntohl(attr->card_type);
1614 attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize);
1616 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1620 * Attach time initialization of mbox logic.
1623 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1625 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1628 INIT_LIST_HEAD(&mod->cmd_q);
1629 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1630 mod->mbhdlr[mc].cbfn = NULL;
1631 mod->mbhdlr[mc].cbarg = ioc->bfa;
1636 * Mbox poll timer -- restarts any pending mailbox requests.
1639 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1641 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1642 struct bfa_mbox_cmd_s *cmd;
1646 * If no command pending, do nothing
1648 if (list_empty(&mod->cmd_q))
1652 * If previous command is not yet fetched by firmware, do nothing
1654 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
1659 * Enqueue command to firmware.
1661 bfa_q_deq(&mod->cmd_q, &cmd);
1662 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1666 * Cleanup any pending requests.
1669 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1671 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1672 struct bfa_mbox_cmd_s *cmd;
1674 while (!list_empty(&mod->cmd_q))
1675 bfa_q_deq(&mod->cmd_q, &cmd);
1679 * Read data from SMEM to host through PCI memmap
1681 * @param[in] ioc memory for IOC
1682 * @param[in] tbuf app memory to store data from smem
1683 * @param[in] soff smem offset
1684 * @param[in] sz size of smem in bytes
1687 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1689 u32 pgnum, loff, r32;
1693 pgnum = bfa_ioc_smem_pgnum(ioc, soff);
1694 loff = bfa_ioc_smem_pgoff(ioc, soff);
1695 bfa_trc(ioc, pgnum);
1700 * Hold semaphore to serialize pll init and fwtrc.
1702 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1704 return BFA_STATUS_FAILED;
1707 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1709 len = sz/sizeof(u32);
1711 for (i = 0; i < len; i++) {
1712 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1713 buf[i] = bfa_os_ntohl(r32);
1714 loff += sizeof(u32);
1717 * handle page offset wrap around
1719 loff = PSS_SMEM_PGOFF(loff);
1722 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1725 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1726 bfa_ioc_smem_pgnum(ioc, 0));
1728 * release semaphore.
1730 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1732 bfa_trc(ioc, pgnum);
1733 return BFA_STATUS_OK;
1737 * Clear SMEM data from host through PCI memmap
1739 * @param[in] ioc memory for IOC
1740 * @param[in] soff smem offset
1741 * @param[in] sz size of smem in bytes
1744 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1749 pgnum = bfa_ioc_smem_pgnum(ioc, soff);
1750 loff = bfa_ioc_smem_pgoff(ioc, soff);
1751 bfa_trc(ioc, pgnum);
1756 * Hold semaphore to serialize pll init and fwtrc.
1758 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1760 return BFA_STATUS_FAILED;
1763 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1765 len = sz/sizeof(u32); /* len in words */
1767 for (i = 0; i < len; i++) {
1768 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1769 loff += sizeof(u32);
1772 * handle page offset wrap around
1774 loff = PSS_SMEM_PGOFF(loff);
1777 bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
1780 bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
1781 bfa_ioc_smem_pgnum(ioc, 0));
1784 * release semaphore.
1786 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1787 bfa_trc(ioc, pgnum);
1788 return BFA_STATUS_OK;
1792 * hal iocpf to ioc interface
1795 bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc)
1797 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1801 bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc)
1803 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1807 bfa_ioc_pf_failed(struct bfa_ioc_s *ioc)
1809 bfa_fsm_send_event(ioc, IOC_E_FAILED);
1813 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1815 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1817 * Provide enable completion callback.
1819 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1820 BFA_LOG(KERN_WARNING, bfad, log_level,
1821 "Running firmware version is incompatible "
1822 "with the driver version\n");
1832 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1836 * Hold semaphore so that nobody can access the chip during init.
1838 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1840 bfa_ioc_pll_init_asic(ioc);
1842 ioc->pllinit = BFA_TRUE;
1844 * release semaphore.
1846 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1848 return BFA_STATUS_OK;
1852 * Interface used by diag module to do firmware boot with memory test
1853 * as the entry vector.
1856 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1860 bfa_ioc_stats(ioc, ioc_boots);
1862 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1866 * Initialize IOC state of all functions on a chip reset.
1868 rb = ioc->pcidev.pci_bar_kva;
1869 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1870 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
1871 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
1873 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
1874 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
1877 bfa_ioc_msgflush(ioc);
1878 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1881 * Enable interrupts just before starting LPU
1883 ioc->cbfn->reset_cbfn(ioc->bfa);
1884 bfa_ioc_lpu_start(ioc);
1888 * Enable/disable IOC failure auto recovery.
1891 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1893 bfa_auto_recover = auto_recover;
1899 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1901 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1905 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1907 u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
1909 return ((r32 != BFI_IOC_UNINIT) &&
1910 (r32 != BFI_IOC_INITING) &&
1911 (r32 != BFI_IOC_MEMTEST));
1915 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1924 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1926 r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
1928 msgp[i] = bfa_os_htonl(r32);
1932 * turn off mailbox interrupt by clearing mailbox status
1934 bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
1935 bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
1939 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1941 union bfi_ioc_i2h_msg_u *msg;
1942 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
1944 msg = (union bfi_ioc_i2h_msg_u *) m;
1946 bfa_ioc_stats(ioc, ioc_isrs);
1948 switch (msg->mh.msg_id) {
1949 case BFI_IOC_I2H_HBEAT:
1952 case BFI_IOC_I2H_READY_EVENT:
1953 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1956 case BFI_IOC_I2H_ENABLE_REPLY:
1957 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1960 case BFI_IOC_I2H_DISABLE_REPLY:
1961 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1964 case BFI_IOC_I2H_GETATTR_REPLY:
1965 bfa_ioc_getattr_reply(ioc);
1969 bfa_trc(ioc, msg->mh.msg_id);
1975 * IOC attach time initialization and setup.
1977 * @param[in] ioc memory for IOC
1978 * @param[in] bfa driver instance structure
1981 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1982 struct bfa_timer_mod_s *timer_mod)
1986 ioc->timer_mod = timer_mod;
1987 ioc->fcmode = BFA_FALSE;
1988 ioc->pllinit = BFA_FALSE;
1989 ioc->dbg_fwsave_once = BFA_TRUE;
1990 ioc->iocpf.ioc = ioc;
1992 bfa_ioc_mbox_attach(ioc);
1993 INIT_LIST_HEAD(&ioc->hb_notify_q);
1995 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1996 bfa_fsm_send_event(ioc, IOC_E_RESET);
2000 * Driver detach time IOC cleanup.
2003 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2005 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2009 * Setup IOC PCI properties.
2011 * @param[in] pcidev PCI device information for this IOC
2014 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2018 ioc->pcidev = *pcidev;
2019 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
2020 ioc->cna = ioc->ctdev && !ioc->fcmode;
2023 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2026 bfa_ioc_set_ct_hwif(ioc);
2028 bfa_ioc_set_cb_hwif(ioc);
2030 bfa_ioc_map_port(ioc);
2031 bfa_ioc_reg_init(ioc);
2035 * Initialize IOC dma memory
2037 * @param[in] dm_kva kernel virtual address of IOC dma memory
2038 * @param[in] dm_pa physical address of IOC dma memory
2041 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2044 * dma memory for firmware attribute
2046 ioc->attr_dma.kva = dm_kva;
2047 ioc->attr_dma.pa = dm_pa;
2048 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2052 * Return size of dma memory required.
2055 bfa_ioc_meminfo(void)
2057 return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
2061 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2063 bfa_ioc_stats(ioc, ioc_enables);
2064 ioc->dbg_fwsave_once = BFA_TRUE;
2066 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2070 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2072 bfa_ioc_stats(ioc, ioc_disables);
2073 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2077 * Returns memory required for saving firmware trace in case of crash.
2078 * Driver must call this interface to allocate memory required for
2079 * automatic saving of firmware trace. Driver should call
2080 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
2084 bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
2086 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2090 * Initialize memory for saving firmware trace. Driver must initialize
2091 * trace memory before call bfa_ioc_enable().
2094 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2096 ioc->dbg_fwsave = dbg_fwsave;
2097 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover);
2101 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
2103 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2107 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
2109 return PSS_SMEM_PGOFF(fmaddr);
2113 * Register mailbox message handler functions
2115 * @param[in] ioc IOC instance
2116 * @param[in] mcfuncs message class handler functions
2119 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2121 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2124 for (mc = 0; mc < BFI_MC_MAX; mc++)
2125 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2129 * Register mailbox message handler function, to be called by common modules
2132 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2133 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2135 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2137 mod->mbhdlr[mc].cbfn = cbfn;
2138 mod->mbhdlr[mc].cbarg = cbarg;
2142 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2143 * Responsibility of caller to serialize
2145 * @param[in] ioc IOC instance
2146 * @param[i] cmd Mailbox command
2149 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2151 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2155 * If a previous command is pending, queue new command
2157 if (!list_empty(&mod->cmd_q)) {
2158 list_add_tail(&cmd->qe, &mod->cmd_q);
2163 * If mailbox is busy, queue command for poll timer
2165 stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
2167 list_add_tail(&cmd->qe, &mod->cmd_q);
2172 * mailbox is free -- queue command to firmware
2174 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2178 * Handle mailbox interrupts
2181 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2183 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2184 struct bfi_mbmsg_s m;
2187 bfa_ioc_msgget(ioc, &m);
2190 * Treat IOC message class as special.
2192 mc = m.mh.msg_class;
2193 if (mc == BFI_MC_IOC) {
2194 bfa_ioc_isr(ioc, &m);
2198 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2201 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2205 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2207 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2211 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2213 ioc->fcmode = BFA_TRUE;
2214 ioc->port_id = bfa_ioc_pcifn(ioc);
2218 * return true if IOC is disabled
2221 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2223 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2224 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2228 * return true if IOC firmware is different.
2231 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2233 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2234 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2235 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2238 #define bfa_ioc_state_disabled(__sm) \
2239 (((__sm) == BFI_IOC_UNINIT) || \
2240 ((__sm) == BFI_IOC_INITING) || \
2241 ((__sm) == BFI_IOC_HWINIT) || \
2242 ((__sm) == BFI_IOC_DISABLED) || \
2243 ((__sm) == BFI_IOC_FAIL) || \
2244 ((__sm) == BFI_IOC_CFG_DISABLED))
2247 * Check if adapter is disabled -- both IOCs should be in a disabled
2251 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2254 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
2256 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2259 ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
2260 if (!bfa_ioc_state_disabled(ioc_state))
2263 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2264 ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
2265 if (!bfa_ioc_state_disabled(ioc_state))
2273 * Add to IOC heartbeat failure notification queue. To be used by common
2274 * modules such as cee, port, diag.
2277 bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
2278 struct bfa_ioc_hbfail_notify_s *notify)
2280 list_add_tail(¬ify->qe, &ioc->hb_notify_q);
2283 #define BFA_MFG_NAME "Brocade"
2285 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2286 struct bfa_adapter_attr_s *ad_attr)
2288 struct bfi_ioc_attr_s *ioc_attr;
2290 ioc_attr = ioc->attr;
2292 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2293 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2294 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2295 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2296 bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2297 sizeof(struct bfa_mfg_vpd_s));
2299 ad_attr->nports = bfa_ioc_get_nports(ioc);
2300 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2302 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2303 /* For now, model descr uses same model string */
2304 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2306 ad_attr->card_type = ioc_attr->card_type;
2307 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2309 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2310 ad_attr->prototype = 1;
2312 ad_attr->prototype = 0;
2314 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2315 ad_attr->mac = bfa_ioc_get_mac(ioc);
2317 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2318 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2319 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2320 ad_attr->asic_rev = ioc_attr->asic_rev;
2322 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2324 ad_attr->cna_capable = ioc->cna;
2325 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
2329 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2331 if (!ioc->ctdev || ioc->fcmode)
2332 return BFA_IOC_TYPE_FC;
2333 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2334 return BFA_IOC_TYPE_FCoE;
2335 else if (ioc->ioc_mc == BFI_MC_LL)
2336 return BFA_IOC_TYPE_LL;
2338 bfa_assert(ioc->ioc_mc == BFI_MC_LL);
2339 return BFA_IOC_TYPE_LL;
2344 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2346 bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2347 bfa_os_memcpy((void *)serial_num,
2348 (void *)ioc->attr->brcd_serialnum,
2349 BFA_ADAPTER_SERIAL_NUM_LEN);
2353 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2355 bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2356 bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2360 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2362 bfa_assert(chip_rev);
2364 bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2370 chip_rev[4] = ioc->attr->asic_rev;
2375 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2377 bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2378 bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
2383 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2385 bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2386 bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2390 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2392 struct bfi_ioc_attr_s *ioc_attr;
2395 bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2397 ioc_attr = ioc->attr;
2402 bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2403 BFA_MFG_NAME, ioc_attr->card_type);
2407 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2409 enum bfa_iocpf_state iocpf_st;
2410 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2412 if (ioc_st == BFA_IOC_ENABLING ||
2413 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2415 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2418 case BFA_IOCPF_SEMWAIT:
2419 ioc_st = BFA_IOC_SEMWAIT;
2422 case BFA_IOCPF_HWINIT:
2423 ioc_st = BFA_IOC_HWINIT;
2426 case BFA_IOCPF_FWMISMATCH:
2427 ioc_st = BFA_IOC_FWMISMATCH;
2430 case BFA_IOCPF_FAIL:
2431 ioc_st = BFA_IOC_FAIL;
2434 case BFA_IOCPF_INITFAIL:
2435 ioc_st = BFA_IOC_INITFAIL;
2447 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2449 bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2451 ioc_attr->state = bfa_ioc_get_state(ioc);
2452 ioc_attr->port_id = ioc->port_id;
2454 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2456 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2458 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2459 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2460 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2467 bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
2469 return ioc->attr->pwwn;
2473 bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
2475 return ioc->attr->nwwn;
2479 bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
2481 return ioc->attr->mfg_pwwn;
2485 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2488 * Check the IOC type and return the appropriate MAC
2490 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2491 return ioc->attr->fcoe_mac;
2493 return ioc->attr->mac;
2497 bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
2499 return ioc->attr->mfg_pwwn;
2503 bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
2505 return ioc->attr->mfg_nwwn;
2509 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2513 m = ioc->attr->mfg_mac;
2514 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2515 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2517 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2518 bfa_ioc_pcifn(ioc));
2524 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2526 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
2530 * Retrieve saved firmware trace from a prior IOC failure.
2533 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2537 if (ioc->dbg_fwsave_len == 0)
2538 return BFA_STATUS_ENOFSAVE;
2541 if (tlen > ioc->dbg_fwsave_len)
2542 tlen = ioc->dbg_fwsave_len;
2544 bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
2546 return BFA_STATUS_OK;
2550 * Clear saved firmware trace
2553 bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
2555 ioc->dbg_fwsave_once = BFA_TRUE;
2559 * Retrieve saved firmware trace from a prior IOC failure.
2562 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2564 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2566 bfa_status_t status;
2568 bfa_trc(ioc, *trclen);
2571 if (tlen > BFA_DBG_FWTRC_LEN)
2572 tlen = BFA_DBG_FWTRC_LEN;
2574 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2580 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2582 struct bfa_mbox_cmd_s cmd;
2583 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2585 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2586 bfa_ioc_portid(ioc));
2587 req->ioc_class = ioc->ioc_mc;
2588 bfa_ioc_mbox_queue(ioc, &cmd);
2592 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2594 u32 fwsync_iter = 1000;
2596 bfa_ioc_send_fwsync(ioc);
2599 * After sending a fw sync mbox command wait for it to
2600 * take effect. We will not wait for a response because
2601 * 1. fw_sync mbox cmd doesn't have a response.
2602 * 2. Even if we implement that, interrupts might not
2603 * be enabled when we call this function.
2604 * So, just keep checking if any mbox cmd is pending, and
2605 * after waiting for a reasonable amount of time, go ahead.
2606 * It is possible that fw has crashed and the mbox command
2607 * is never acknowledged.
2609 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2614 * Dump firmware smem
2617 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2618 u32 *offset, int *buflen)
2622 bfa_status_t status;
2623 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2625 if (*offset >= smem_len) {
2626 *offset = *buflen = 0;
2627 return BFA_STATUS_EINVAL;
2634 * First smem read, sync smem before proceeding
2635 * No need to sync before reading every chunk.
2638 bfa_ioc_fwsync(ioc);
2640 if ((loff + dlen) >= smem_len)
2641 dlen = smem_len - loff;
2643 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2645 if (status != BFA_STATUS_OK) {
2646 *offset = *buflen = 0;
2652 if (*offset >= smem_len)
2661 * Firmware statistics
2664 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2666 u32 loff = BFI_IOC_FWSTATS_OFF + \
2667 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2669 bfa_status_t status;
2671 if (ioc->stats_busy) {
2672 bfa_trc(ioc, ioc->stats_busy);
2673 return BFA_STATUS_DEVBUSY;
2675 ioc->stats_busy = BFA_TRUE;
2677 tlen = sizeof(struct bfa_fw_stats_s);
2678 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2680 ioc->stats_busy = BFA_FALSE;
2685 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2687 u32 loff = BFI_IOC_FWSTATS_OFF + \
2688 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2690 bfa_status_t status;
2692 if (ioc->stats_busy) {
2693 bfa_trc(ioc, ioc->stats_busy);
2694 return BFA_STATUS_DEVBUSY;
2696 ioc->stats_busy = BFA_TRUE;
2698 tlen = sizeof(struct bfa_fw_stats_s);
2699 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2701 ioc->stats_busy = BFA_FALSE;
2706 * Save firmware trace if configured.
2709 bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2713 if (ioc->dbg_fwsave_len) {
2714 tlen = ioc->dbg_fwsave_len;
2715 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2720 * Firmware failure detected. Start recovery actions.
2723 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2725 if (ioc->dbg_fwsave_once) {
2726 ioc->dbg_fwsave_once = BFA_FALSE;
2727 bfa_ioc_debug_save(ioc);
2730 bfa_ioc_stats(ioc, ioc_hbfails);
2731 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2735 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2737 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2742 * hal_iocpf_pvt BFA IOC PF private functions
2746 bfa_iocpf_enable(struct bfa_ioc_s *ioc)
2748 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2752 bfa_iocpf_disable(struct bfa_ioc_s *ioc)
2754 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2758 bfa_iocpf_fail(struct bfa_ioc_s *ioc)
2760 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2764 bfa_iocpf_initfail(struct bfa_ioc_s *ioc)
2766 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2770 bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc)
2772 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2776 bfa_iocpf_stop(struct bfa_ioc_s *ioc)
2778 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2782 bfa_iocpf_timeout(void *ioc_arg)
2784 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2787 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2791 bfa_iocpf_sem_timeout(void *ioc_arg)
2793 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2795 bfa_ioc_hw_sem_get(ioc);
2799 * bfa timer function
2802 bfa_timer_init(struct bfa_timer_mod_s *mod)
2804 INIT_LIST_HEAD(&mod->timer_q);
2808 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2810 struct list_head *qh = &mod->timer_q;
2811 struct list_head *qe, *qe_next;
2812 struct bfa_timer_s *elem;
2813 struct list_head timedout_q;
2815 INIT_LIST_HEAD(&timedout_q);
2817 qe = bfa_q_next(qh);
2820 qe_next = bfa_q_next(qe);
2822 elem = (struct bfa_timer_s *) qe;
2823 if (elem->timeout <= BFA_TIMER_FREQ) {
2825 list_del(&elem->qe);
2826 list_add_tail(&elem->qe, &timedout_q);
2828 elem->timeout -= BFA_TIMER_FREQ;
2831 qe = qe_next; /* go to next elem */
2835 * Pop all the timeout entries
2837 while (!list_empty(&timedout_q)) {
2838 bfa_q_deq(&timedout_q, &elem);
2839 elem->timercb(elem->arg);
2844 * Should be called with lock protection
2847 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2848 void (*timercb) (void *), void *arg, unsigned int timeout)
2851 bfa_assert(timercb != NULL);
2852 bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
2854 timer->timeout = timeout;
2855 timer->timercb = timercb;
2858 list_add_tail(&timer->qe, &mod->timer_q);
2862 * Should be called with lock protection
2865 bfa_timer_stop(struct bfa_timer_s *timer)
2867 bfa_assert(!list_empty(&timer->qe));
2869 list_del(&timer->qe);