2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
19 #include "bfi_ctreg.h"
21 #include "bfa_defs_svc.h"
24 BFA_TRC_FILE(CNA, IOC);
27 * IOC local definitions
29 #define BFA_IOC_TOV 3000 /* msecs */
30 #define BFA_IOC_HWSEM_TOV 500 /* msecs */
31 #define BFA_IOC_HB_TOV 500 /* msecs */
32 #define BFA_IOC_HWINIT_MAX 2
33 #define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
35 #define bfa_ioc_timer_start(__ioc) \
36 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
37 bfa_ioc_timeout, (__ioc), BFA_IOC_TOV)
38 #define bfa_ioc_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
40 #define bfa_hb_timer_start(__ioc) \
41 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->hb_timer, \
42 bfa_ioc_hb_check, (__ioc), BFA_IOC_HB_TOV)
43 #define bfa_hb_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->hb_timer)
45 #define BFA_DBG_FWTRC_ENTS (BFI_IOC_TRC_ENTS)
46 #define BFA_DBG_FWTRC_LEN \
47 (BFA_DBG_FWTRC_ENTS * sizeof(struct bfa_trc_s) + \
48 (sizeof(struct bfa_trc_mod_s) - \
49 BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
50 #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
53 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
56 #define bfa_ioc_firmware_lock(__ioc) \
57 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
58 #define bfa_ioc_firmware_unlock(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
60 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
61 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
62 #define bfa_ioc_notify_hbfail(__ioc) \
63 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
65 #ifdef BFA_IOC_IS_UEFI
66 #define bfa_ioc_is_bios_optrom(__ioc) (0)
67 #define bfa_ioc_is_uefi(__ioc) BFA_IOC_IS_UEFI
69 #define bfa_ioc_is_bios_optrom(__ioc) \
70 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
71 #define bfa_ioc_is_uefi(__ioc) (0)
74 #define bfa_ioc_mbox_cmd_pending(__ioc) \
75 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
76 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
78 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
81 * forward declarations
83 static void bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
84 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
85 static void bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
86 static void bfa_ioc_timeout(void *ioc);
87 static void bfa_ioc_send_enable(struct bfa_ioc_s *ioc);
88 static void bfa_ioc_send_disable(struct bfa_ioc_s *ioc);
89 static void bfa_ioc_send_getattr(struct bfa_ioc_s *ioc);
90 static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc);
91 static void bfa_ioc_hb_stop(struct bfa_ioc_s *ioc);
92 static void bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force);
93 static void bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
94 static void bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
95 static void bfa_ioc_recover(struct bfa_ioc_s *ioc);
96 static void bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc);
97 static void bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
98 static void bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
99 static void bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc);
100 static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc);
101 static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
102 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
109 * IOC state machine definitions/declarations
112 IOC_E_RESET = 1, /* IOC reset request */
113 IOC_E_ENABLE = 2, /* IOC enable request */
114 IOC_E_DISABLE = 3, /* IOC disable request */
115 IOC_E_DETACH = 4, /* driver detach cleanup */
116 IOC_E_ENABLED = 5, /* f/w enabled */
117 IOC_E_FWRSP_GETATTR = 6, /* IOC get attribute response */
118 IOC_E_DISABLED = 7, /* f/w disabled */
119 IOC_E_FAILED = 8, /* failure notice by iocpf sm */
120 IOC_E_HBFAIL = 9, /* heartbeat failure */
121 IOC_E_HWERROR = 10, /* hardware error interrupt */
122 IOC_E_TIMEOUT = 11, /* timeout */
125 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc_s, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc_s, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc_s, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc_s, enum ioc_event);
129 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc_s, enum ioc_event);
130 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc_s, enum ioc_event);
131 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc_s, enum ioc_event);
132 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc_s, enum ioc_event);
133 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc_s, enum ioc_event);
135 static struct bfa_sm_table_s ioc_sm_table[] = {
136 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
137 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
138 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
139 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
140 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
141 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
142 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
143 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
144 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
148 * IOCPF state machine definitions/declarations
151 #define bfa_iocpf_timer_start(__ioc) \
152 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
153 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV)
154 #define bfa_iocpf_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->ioc_timer)
156 #define bfa_iocpf_recovery_timer_start(__ioc) \
157 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer, \
158 bfa_iocpf_timeout, (__ioc), BFA_IOC_TOV_RECOVER)
160 #define bfa_sem_timer_start(__ioc) \
161 bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->sem_timer, \
162 bfa_iocpf_sem_timeout, (__ioc), BFA_IOC_HWSEM_TOV)
163 #define bfa_sem_timer_stop(__ioc) bfa_timer_stop(&(__ioc)->sem_timer)
166 * Forward declareations for iocpf state machine
168 static void bfa_iocpf_enable(struct bfa_ioc_s *ioc);
169 static void bfa_iocpf_disable(struct bfa_ioc_s *ioc);
170 static void bfa_iocpf_fail(struct bfa_ioc_s *ioc);
171 static void bfa_iocpf_initfail(struct bfa_ioc_s *ioc);
172 static void bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc);
173 static void bfa_iocpf_stop(struct bfa_ioc_s *ioc);
174 static void bfa_iocpf_timeout(void *ioc_arg);
175 static void bfa_iocpf_sem_timeout(void *ioc_arg);
178 * IOCPF state machine events
181 IOCPF_E_ENABLE = 1, /* IOCPF enable request */
182 IOCPF_E_DISABLE = 2, /* IOCPF disable request */
183 IOCPF_E_STOP = 3, /* stop on driver detach */
184 IOCPF_E_FWREADY = 4, /* f/w initialization done */
185 IOCPF_E_FWRSP_ENABLE = 5, /* enable f/w response */
186 IOCPF_E_FWRSP_DISABLE = 6, /* disable f/w response */
187 IOCPF_E_FAIL = 7, /* failure notice by ioc sm */
188 IOCPF_E_INITFAIL = 8, /* init fail notice by ioc sm */
189 IOCPF_E_GETATTRFAIL = 9, /* init fail notice by ioc sm */
190 IOCPF_E_SEMLOCKED = 10, /* h/w semaphore is locked */
191 IOCPF_E_TIMEOUT = 11, /* f/w response timeout */
197 enum bfa_iocpf_state {
198 BFA_IOCPF_RESET = 1, /* IOC is in reset state */
199 BFA_IOCPF_SEMWAIT = 2, /* Waiting for IOC h/w semaphore */
200 BFA_IOCPF_HWINIT = 3, /* IOC h/w is being initialized */
201 BFA_IOCPF_READY = 4, /* IOCPF is initialized */
202 BFA_IOCPF_INITFAIL = 5, /* IOCPF failed */
203 BFA_IOCPF_FAIL = 6, /* IOCPF failed */
204 BFA_IOCPF_DISABLING = 7, /* IOCPF is being disabled */
205 BFA_IOCPF_DISABLED = 8, /* IOCPF is disabled */
206 BFA_IOCPF_FWMISMATCH = 9, /* IOC f/w different from drivers */
209 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf_s, enum iocpf_event);
210 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf_s, enum iocpf_event);
211 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf_s, enum iocpf_event);
212 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf_s, enum iocpf_event);
213 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf_s, enum iocpf_event);
214 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf_s, enum iocpf_event);
215 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf_s, enum iocpf_event);
216 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf_s, enum iocpf_event);
217 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf_s, enum iocpf_event);
218 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf_s, enum iocpf_event);
219 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf_s, enum iocpf_event);
221 static struct bfa_sm_table_s iocpf_sm_table[] = {
222 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
223 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
224 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
225 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
226 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
227 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
228 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
229 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
230 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
231 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
232 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
240 * Beginning state. IOC uninit state.
244 bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc)
249 * IOC is in uninit state.
252 bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event)
258 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
262 bfa_sm_fault(ioc, event);
266 * Reset entry actions -- initialize state machine
269 bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc)
271 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
275 * IOC is in reset state.
278 bfa_ioc_sm_reset(struct bfa_ioc_s *ioc, enum ioc_event event)
284 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
288 bfa_ioc_disable_comp(ioc);
292 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
296 bfa_sm_fault(ioc, event);
302 bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc)
304 bfa_iocpf_enable(ioc);
308 * Host IOC function is being enabled, awaiting response from firmware.
309 * Semaphore is acquired.
312 bfa_ioc_sm_enabling(struct bfa_ioc_s *ioc, enum ioc_event event)
318 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
322 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
326 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
327 bfa_iocpf_initfail(ioc);
331 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
343 bfa_sm_fault(ioc, event);
349 bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc)
351 bfa_ioc_timer_start(ioc);
352 bfa_ioc_send_getattr(ioc);
356 * IOC configuration in progress. Timer is active.
359 bfa_ioc_sm_getattr(struct bfa_ioc_s *ioc, enum ioc_event event)
364 case IOC_E_FWRSP_GETATTR:
365 bfa_ioc_timer_stop(ioc);
366 bfa_ioc_check_attr_wwns(ioc);
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
371 bfa_ioc_timer_stop(ioc);
372 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
376 bfa_ioc_timer_stop(ioc);
380 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
381 bfa_iocpf_getattrfail(ioc);
385 bfa_ioc_timer_stop(ioc);
386 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
393 bfa_sm_fault(ioc, event);
399 bfa_ioc_sm_op_entry(struct bfa_ioc_s *ioc)
401 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
403 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
404 bfa_ioc_hb_monitor(ioc);
405 BFA_LOG(KERN_INFO, bfad, log_level, "IOC enabled\n");
409 bfa_ioc_sm_op(struct bfa_ioc_s *ioc, enum ioc_event event)
418 bfa_ioc_hb_stop(ioc);
419 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
423 bfa_ioc_hb_stop(ioc);
424 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
428 bfa_ioc_hb_stop(ioc);
429 /* !!! fall through !!! */
432 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
437 bfa_sm_fault(ioc, event);
443 bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc)
445 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
446 bfa_iocpf_disable(ioc);
447 BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
451 * IOC is being disabled
454 bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event)
460 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
465 * No state change. Will move to disabled state
466 * after iocpf sm completes failure processing and
467 * moves to disabled state.
473 bfa_sm_fault(ioc, event);
478 * IOC disable completion entry.
481 bfa_ioc_sm_disabled_entry(struct bfa_ioc_s *ioc)
483 bfa_ioc_disable_comp(ioc);
487 bfa_ioc_sm_disabled(struct bfa_ioc_s *ioc, enum ioc_event event)
493 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
497 ioc->cbfn->disable_cbfn(ioc->bfa);
501 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
506 bfa_sm_fault(ioc, event);
512 bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc)
514 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
518 * Hardware initialization failed.
521 bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event)
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
532 * Initialization failure during iocpf init retry.
534 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
538 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
542 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
547 bfa_sm_fault(ioc, event);
553 bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc)
555 struct list_head *qe;
556 struct bfa_ioc_hbfail_notify_s *notify;
557 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
560 * Notify driver and common modules registered for notification.
562 ioc->cbfn->hbfail_cbfn(ioc->bfa);
563 list_for_each(qe, &ioc->hb_notify_q) {
564 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
565 notify->cbfn(notify->cbarg);
568 BFA_LOG(KERN_CRIT, bfad, log_level,
569 "Heart Beat of IOC has failed\n");
576 bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event)
584 * Initialization failure during iocpf recovery.
585 * !!! Fall through !!!
588 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
592 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
596 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
601 * HB failure notification, ignore.
605 bfa_sm_fault(ioc, event);
612 * IOCPF State Machine
617 * Reset entry actions -- initialize state machine
620 bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf)
622 iocpf->retry_count = 0;
623 iocpf->auto_recover = bfa_auto_recover;
627 * Beginning state. IOC is in reset state.
630 bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
632 struct bfa_ioc_s *ioc = iocpf->ioc;
638 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
645 bfa_sm_fault(ioc, event);
650 * Semaphore should be acquired for version check.
653 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf)
655 bfa_ioc_hw_sem_get(iocpf->ioc);
659 * Awaiting h/w semaphore to continue with version check.
662 bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
664 struct bfa_ioc_s *ioc = iocpf->ioc;
669 case IOCPF_E_SEMLOCKED:
670 if (bfa_ioc_firmware_lock(ioc)) {
671 iocpf->retry_count = 0;
672 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
674 bfa_ioc_hw_sem_release(ioc);
675 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
679 case IOCPF_E_DISABLE:
680 bfa_ioc_hw_sem_get_cancel(ioc);
681 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
682 bfa_ioc_pf_disabled(ioc);
686 bfa_ioc_hw_sem_get_cancel(ioc);
687 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
691 bfa_sm_fault(ioc, event);
696 * Notify enable completion callback.
699 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf)
702 * Call only the first time sm enters fwmismatch state.
704 if (iocpf->retry_count == 0)
705 bfa_ioc_pf_fwmismatch(iocpf->ioc);
707 iocpf->retry_count++;
708 bfa_iocpf_timer_start(iocpf->ioc);
712 * Awaiting firmware version match.
715 bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
717 struct bfa_ioc_s *ioc = iocpf->ioc;
722 case IOCPF_E_TIMEOUT:
723 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
726 case IOCPF_E_DISABLE:
727 bfa_iocpf_timer_stop(ioc);
728 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
729 bfa_ioc_pf_disabled(ioc);
733 bfa_iocpf_timer_stop(ioc);
734 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
738 bfa_sm_fault(ioc, event);
743 * Request for semaphore.
746 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf)
748 bfa_ioc_hw_sem_get(iocpf->ioc);
752 * Awaiting semaphore for h/w initialzation.
755 bfa_iocpf_sm_semwait(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
757 struct bfa_ioc_s *ioc = iocpf->ioc;
762 case IOCPF_E_SEMLOCKED:
763 iocpf->retry_count = 0;
764 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
767 case IOCPF_E_DISABLE:
768 bfa_ioc_hw_sem_get_cancel(ioc);
769 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
773 bfa_sm_fault(ioc, event);
779 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf)
781 bfa_iocpf_timer_start(iocpf->ioc);
782 bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
786 * Hardware is being initialized. Interrupts are enabled.
787 * Holding hardware semaphore lock.
790 bfa_iocpf_sm_hwinit(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
792 struct bfa_ioc_s *ioc = iocpf->ioc;
797 case IOCPF_E_FWREADY:
798 bfa_iocpf_timer_stop(ioc);
799 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
802 case IOCPF_E_INITFAIL:
803 bfa_iocpf_timer_stop(ioc);
805 * !!! fall through !!!
808 case IOCPF_E_TIMEOUT:
809 iocpf->retry_count++;
810 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
811 bfa_iocpf_timer_start(ioc);
812 bfa_ioc_reset(ioc, BFA_TRUE);
816 bfa_ioc_hw_sem_release(ioc);
817 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
819 if (event == IOCPF_E_TIMEOUT)
820 bfa_ioc_pf_failed(ioc);
823 case IOCPF_E_DISABLE:
824 bfa_ioc_hw_sem_release(ioc);
825 bfa_iocpf_timer_stop(ioc);
826 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
830 bfa_sm_fault(ioc, event);
836 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf)
838 bfa_iocpf_timer_start(iocpf->ioc);
839 bfa_ioc_send_enable(iocpf->ioc);
843 * Host IOC function is being enabled, awaiting response from firmware.
844 * Semaphore is acquired.
847 bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
849 struct bfa_ioc_s *ioc = iocpf->ioc;
854 case IOCPF_E_FWRSP_ENABLE:
855 bfa_iocpf_timer_stop(ioc);
856 bfa_ioc_hw_sem_release(ioc);
857 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
860 case IOCPF_E_INITFAIL:
861 bfa_iocpf_timer_stop(ioc);
863 * !!! fall through !!!
866 case IOCPF_E_TIMEOUT:
867 iocpf->retry_count++;
868 if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
869 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
870 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
874 bfa_ioc_hw_sem_release(ioc);
875 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
877 if (event == IOCPF_E_TIMEOUT)
878 bfa_ioc_pf_failed(ioc);
881 case IOCPF_E_DISABLE:
882 bfa_iocpf_timer_stop(ioc);
883 bfa_ioc_hw_sem_release(ioc);
884 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
887 case IOCPF_E_FWREADY:
888 bfa_ioc_send_enable(ioc);
892 bfa_sm_fault(ioc, event);
899 bfa_iocpf_sm_ready_entry(struct bfa_iocpf_s *iocpf)
901 bfa_ioc_pf_enabled(iocpf->ioc);
905 bfa_iocpf_sm_ready(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
907 struct bfa_ioc_s *ioc = iocpf->ioc;
912 case IOCPF_E_DISABLE:
913 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
916 case IOCPF_E_GETATTRFAIL:
917 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
921 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
924 case IOCPF_E_FWREADY:
925 if (bfa_ioc_is_operational(ioc))
926 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
928 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
930 bfa_ioc_pf_failed(ioc);
934 bfa_sm_fault(ioc, event);
940 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf)
942 bfa_iocpf_timer_start(iocpf->ioc);
943 bfa_ioc_send_disable(iocpf->ioc);
947 * IOC is being disabled
950 bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
952 struct bfa_ioc_s *ioc = iocpf->ioc;
957 case IOCPF_E_FWRSP_DISABLE:
958 case IOCPF_E_FWREADY:
959 bfa_iocpf_timer_stop(ioc);
960 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
964 bfa_iocpf_timer_stop(ioc);
966 * !!! fall through !!!
969 case IOCPF_E_TIMEOUT:
970 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
971 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
974 case IOCPF_E_FWRSP_ENABLE:
978 bfa_sm_fault(ioc, event);
983 * IOC disable completion entry.
986 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf_s *iocpf)
988 bfa_ioc_pf_disabled(iocpf->ioc);
992 bfa_iocpf_sm_disabled(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
994 struct bfa_ioc_s *ioc = iocpf->ioc;
1000 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1004 bfa_ioc_firmware_unlock(ioc);
1005 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1009 bfa_sm_fault(ioc, event);
1015 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf)
1017 bfa_iocpf_timer_start(iocpf->ioc);
1021 * Hardware initialization failed.
1024 bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1026 struct bfa_ioc_s *ioc = iocpf->ioc;
1028 bfa_trc(ioc, event);
1031 case IOCPF_E_DISABLE:
1032 bfa_iocpf_timer_stop(ioc);
1033 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1037 bfa_iocpf_timer_stop(ioc);
1038 bfa_ioc_firmware_unlock(ioc);
1039 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1042 case IOCPF_E_TIMEOUT:
1043 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1047 bfa_sm_fault(ioc, event);
1053 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
1056 * Mark IOC as failed in hardware and stop firmware.
1058 bfa_ioc_lpu_stop(iocpf->ioc);
1059 writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
1062 * Notify other functions on HB failure.
1064 bfa_ioc_notify_hbfail(iocpf->ioc);
1067 * Flush any queued up mailbox requests.
1069 bfa_ioc_mbox_hbfail(iocpf->ioc);
1071 if (iocpf->auto_recover)
1072 bfa_iocpf_recovery_timer_start(iocpf->ioc);
1076 * IOC is in failed state.
1079 bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event)
1081 struct bfa_ioc_s *ioc = iocpf->ioc;
1083 bfa_trc(ioc, event);
1086 case IOCPF_E_DISABLE:
1087 if (iocpf->auto_recover)
1088 bfa_iocpf_timer_stop(ioc);
1089 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1092 case IOCPF_E_TIMEOUT:
1093 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1097 bfa_sm_fault(ioc, event);
1104 * hal_ioc_pvt BFA IOC private functions
1108 bfa_ioc_disable_comp(struct bfa_ioc_s *ioc)
1110 struct list_head *qe;
1111 struct bfa_ioc_hbfail_notify_s *notify;
1113 ioc->cbfn->disable_cbfn(ioc->bfa);
1116 * Notify common modules registered for notification.
1118 list_for_each(qe, &ioc->hb_notify_q) {
1119 notify = (struct bfa_ioc_hbfail_notify_s *) qe;
1120 notify->cbfn(notify->cbarg);
1125 bfa_ioc_sem_get(void __iomem *sem_reg)
1129 #define BFA_SEM_SPINCNT 3000
1131 r32 = readl(sem_reg);
1133 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1136 r32 = readl(sem_reg);
1142 bfa_assert(cnt < BFA_SEM_SPINCNT);
1147 bfa_ioc_sem_release(void __iomem *sem_reg)
1153 bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc)
1158 * First read to the semaphore register will return 0, subsequent reads
1159 * will return 1. Semaphore is released by writing 1 to the register
1161 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1163 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1167 bfa_sem_timer_start(ioc);
1171 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
1173 writel(1, ioc->ioc_regs.ioc_sem_reg);
1177 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc)
1179 bfa_sem_timer_stop(ioc);
1183 * Initialize LPU local memory (aka secondary memory / SRAM)
1186 bfa_ioc_lmem_init(struct bfa_ioc_s *ioc)
1190 #define PSS_LMEM_INIT_TIME 10000
1192 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1193 pss_ctl &= ~__PSS_LMEM_RESET;
1194 pss_ctl |= __PSS_LMEM_INIT_EN;
1197 * i2c workaround 12.5khz clock
1199 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1200 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1203 * wait for memory initialization to be complete
1207 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1209 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1212 * If memory initialization is not successful, IOC timeout will catch
1215 bfa_assert(pss_ctl & __PSS_LMEM_INIT_DONE);
1216 bfa_trc(ioc, pss_ctl);
1218 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1219 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1223 bfa_ioc_lpu_start(struct bfa_ioc_s *ioc)
1228 * Take processor out of reset.
1230 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1231 pss_ctl &= ~__PSS_LPU0_RESET;
1233 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1237 bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc)
1242 * Put processors in reset.
1244 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1245 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1247 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1251 * Get driver and firmware versions.
1254 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1259 u32 *fwsig = (u32 *) fwhdr;
1261 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1262 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1263 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1265 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
1268 bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1269 loff += sizeof(u32);
1274 * Returns TRUE if same.
1277 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
1279 struct bfi_ioc_image_hdr_s *drv_fwhdr;
1282 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1283 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1285 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1286 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i]) {
1288 bfa_trc(ioc, fwhdr->md5sum[i]);
1289 bfa_trc(ioc, drv_fwhdr->md5sum[i]);
1294 bfa_trc(ioc, fwhdr->md5sum[0]);
1299 * Return true if current running version is valid. Firmware signature and
1300 * execution context (driver/bios) must match.
1302 static bfa_boolean_t
1303 bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env)
1305 struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
1308 * If bios/efi boot (flash based) -- return true
1310 if (bfa_ioc_is_bios_optrom(ioc))
1313 bfa_ioc_fwver_get(ioc, &fwhdr);
1314 drv_fwhdr = (struct bfi_ioc_image_hdr_s *)
1315 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1317 if (fwhdr.signature != drv_fwhdr->signature) {
1318 bfa_trc(ioc, fwhdr.signature);
1319 bfa_trc(ioc, drv_fwhdr->signature);
1323 if (swab32(fwhdr.param) != boot_env) {
1324 bfa_trc(ioc, fwhdr.param);
1325 bfa_trc(ioc, boot_env);
1329 return bfa_ioc_fwver_cmp(ioc, &fwhdr);
1333 * Conditionally flush any pending message from firmware at start.
1336 bfa_ioc_msgflush(struct bfa_ioc_s *ioc)
1340 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1342 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1347 bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1349 enum bfi_ioc_state ioc_fwstate;
1350 bfa_boolean_t fwvalid;
1354 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1357 ioc_fwstate = BFI_IOC_UNINIT;
1359 bfa_trc(ioc, ioc_fwstate);
1361 boot_type = BFI_BOOT_TYPE_NORMAL;
1362 boot_env = BFI_BOOT_LOADER_OS;
1365 * Flash based firmware boot BIOS env.
1367 if (bfa_ioc_is_bios_optrom(ioc)) {
1368 boot_type = BFI_BOOT_TYPE_FLASH;
1369 boot_env = BFI_BOOT_LOADER_BIOS;
1373 * Flash based firmware boot UEFI env.
1375 if (bfa_ioc_is_uefi(ioc)) {
1376 boot_type = BFI_BOOT_TYPE_FLASH;
1377 boot_env = BFI_BOOT_LOADER_UEFI;
1381 * check if firmware is valid
1383 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1384 BFA_FALSE : bfa_ioc_fwver_valid(ioc, boot_env);
1387 bfa_ioc_boot(ioc, boot_type, boot_env);
1392 * If hardware initialization is in progress (initialized by other IOC),
1393 * just wait for an initialization completion interrupt.
1395 if (ioc_fwstate == BFI_IOC_INITING) {
1396 ioc->cbfn->reset_cbfn(ioc->bfa);
1401 * If IOC function is disabled and firmware version is same,
1402 * just re-enable IOC.
1404 * If option rom, IOC must not be in operational state. With
1405 * convergence, IOC will be in operational state when 2nd driver
1408 if (ioc_fwstate == BFI_IOC_DISABLED ||
1409 (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
1412 * When using MSI-X any pending firmware ready event should
1413 * be flushed. Otherwise MSI-X interrupts are not delivered.
1415 bfa_ioc_msgflush(ioc);
1416 ioc->cbfn->reset_cbfn(ioc->bfa);
1417 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1422 * Initialize the h/w for any other states.
1424 bfa_ioc_boot(ioc, boot_type, boot_env);
1428 bfa_ioc_timeout(void *ioc_arg)
1430 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
1433 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1437 bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len)
1439 u32 *msgp = (u32 *) ioc_msg;
1442 bfa_trc(ioc, msgp[0]);
1445 bfa_assert(len <= BFI_IOC_MSGLEN_MAX);
1448 * first write msg to mailbox registers
1450 for (i = 0; i < len / sizeof(u32); i++)
1451 writel(cpu_to_le32(msgp[i]),
1452 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1454 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1455 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1458 * write 1 to mailbox CMD to trigger LPU event
1460 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1461 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1465 bfa_ioc_send_enable(struct bfa_ioc_s *ioc)
1467 struct bfi_ioc_ctrl_req_s enable_req;
1468 struct bfa_timeval_s tv;
1470 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1471 bfa_ioc_portid(ioc));
1472 enable_req.ioc_class = ioc->ioc_mc;
1473 bfa_os_gettimeofday(&tv);
1474 enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
1475 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1479 bfa_ioc_send_disable(struct bfa_ioc_s *ioc)
1481 struct bfi_ioc_ctrl_req_s disable_req;
1483 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1484 bfa_ioc_portid(ioc));
1485 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req_s));
1489 bfa_ioc_send_getattr(struct bfa_ioc_s *ioc)
1491 struct bfi_ioc_getattr_req_s attr_req;
1493 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1494 bfa_ioc_portid(ioc));
1495 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1496 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1500 bfa_ioc_hb_check(void *cbarg)
1502 struct bfa_ioc_s *ioc = cbarg;
1505 hb_count = readl(ioc->ioc_regs.heartbeat);
1506 if (ioc->hb_count == hb_count) {
1507 printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
1508 bfa_ioc_recover(ioc);
1511 ioc->hb_count = hb_count;
1514 bfa_ioc_mbox_poll(ioc);
1515 bfa_hb_timer_start(ioc);
1519 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
1521 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1522 bfa_hb_timer_start(ioc);
1526 bfa_ioc_hb_stop(struct bfa_ioc_s *ioc)
1528 bfa_hb_timer_stop(ioc);
1533 * Initiate a full firmware download.
1536 bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type,
1546 * Initialize LMEM first before code download
1548 bfa_ioc_lmem_init(ioc);
1550 bfa_trc(ioc, bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)));
1551 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1553 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1554 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1556 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1558 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1560 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1561 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1562 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1563 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1569 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
1570 fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
1572 loff += sizeof(u32);
1575 * handle page offset wrap around
1577 loff = PSS_SMEM_PGOFF(loff);
1580 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1584 writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
1587 * Set boot type and boot param at the end.
1589 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
1591 bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
1596 bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force)
1598 bfa_ioc_hwinit(ioc, force);
1602 * Update BFA configuration from firmware configuration.
1605 bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc)
1607 struct bfi_ioc_attr_s *attr = ioc->attr;
1609 attr->adapter_prop = be32_to_cpu(attr->adapter_prop);
1610 attr->card_type = be32_to_cpu(attr->card_type);
1611 attr->maxfrsize = be16_to_cpu(attr->maxfrsize);
1613 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1617 * Attach time initialization of mbox logic.
1620 bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc)
1622 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1625 INIT_LIST_HEAD(&mod->cmd_q);
1626 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1627 mod->mbhdlr[mc].cbfn = NULL;
1628 mod->mbhdlr[mc].cbarg = ioc->bfa;
1633 * Mbox poll timer -- restarts any pending mailbox requests.
1636 bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc)
1638 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1639 struct bfa_mbox_cmd_s *cmd;
1643 * If no command pending, do nothing
1645 if (list_empty(&mod->cmd_q))
1649 * If previous command is not yet fetched by firmware, do nothing
1651 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1656 * Enqueue command to firmware.
1658 bfa_q_deq(&mod->cmd_q, &cmd);
1659 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1663 * Cleanup any pending requests.
1666 bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc)
1668 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
1669 struct bfa_mbox_cmd_s *cmd;
1671 while (!list_empty(&mod->cmd_q))
1672 bfa_q_deq(&mod->cmd_q, &cmd);
1676 * Read data from SMEM to host through PCI memmap
1678 * @param[in] ioc memory for IOC
1679 * @param[in] tbuf app memory to store data from smem
1680 * @param[in] soff smem offset
1681 * @param[in] sz size of smem in bytes
1684 bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz)
1686 u32 pgnum, loff, r32;
1690 pgnum = bfa_ioc_smem_pgnum(ioc, soff);
1691 loff = bfa_ioc_smem_pgoff(ioc, soff);
1692 bfa_trc(ioc, pgnum);
1697 * Hold semaphore to serialize pll init and fwtrc.
1699 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1701 return BFA_STATUS_FAILED;
1704 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1706 len = sz/sizeof(u32);
1708 for (i = 0; i < len; i++) {
1709 r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
1710 buf[i] = be32_to_cpu(r32);
1711 loff += sizeof(u32);
1714 * handle page offset wrap around
1716 loff = PSS_SMEM_PGOFF(loff);
1719 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1722 writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
1724 * release semaphore.
1726 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1728 bfa_trc(ioc, pgnum);
1729 return BFA_STATUS_OK;
1733 * Clear SMEM data from host through PCI memmap
1735 * @param[in] ioc memory for IOC
1736 * @param[in] soff smem offset
1737 * @param[in] sz size of smem in bytes
1740 bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz)
1745 pgnum = bfa_ioc_smem_pgnum(ioc, soff);
1746 loff = bfa_ioc_smem_pgoff(ioc, soff);
1747 bfa_trc(ioc, pgnum);
1752 * Hold semaphore to serialize pll init and fwtrc.
1754 if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) {
1756 return BFA_STATUS_FAILED;
1759 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1761 len = sz/sizeof(u32); /* len in words */
1763 for (i = 0; i < len; i++) {
1764 bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
1765 loff += sizeof(u32);
1768 * handle page offset wrap around
1770 loff = PSS_SMEM_PGOFF(loff);
1773 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1776 writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
1779 * release semaphore.
1781 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1782 bfa_trc(ioc, pgnum);
1783 return BFA_STATUS_OK;
1787 * hal iocpf to ioc interface
1790 bfa_ioc_pf_enabled(struct bfa_ioc_s *ioc)
1792 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1796 bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc)
1798 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1802 bfa_ioc_pf_failed(struct bfa_ioc_s *ioc)
1804 bfa_fsm_send_event(ioc, IOC_E_FAILED);
1808 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
1810 struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
1812 * Provide enable completion callback.
1814 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1815 BFA_LOG(KERN_WARNING, bfad, log_level,
1816 "Running firmware version is incompatible "
1817 "with the driver version\n");
1827 bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
1831 * Hold semaphore so that nobody can access the chip during init.
1833 bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1835 bfa_ioc_pll_init_asic(ioc);
1837 ioc->pllinit = BFA_TRUE;
1839 * release semaphore.
1841 bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1843 return BFA_STATUS_OK;
1847 * Interface used by diag module to do firmware boot with memory test
1848 * as the entry vector.
1851 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
1855 bfa_ioc_stats(ioc, ioc_boots);
1857 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1861 * Initialize IOC state of all functions on a chip reset.
1863 rb = ioc->pcidev.pci_bar_kva;
1864 if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
1865 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1866 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1868 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1869 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1872 bfa_ioc_msgflush(ioc);
1873 bfa_ioc_download_fw(ioc, boot_type, boot_env);
1876 * Enable interrupts just before starting LPU
1878 ioc->cbfn->reset_cbfn(ioc->bfa);
1879 bfa_ioc_lpu_start(ioc);
1883 * Enable/disable IOC failure auto recovery.
1886 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
1888 bfa_auto_recover = auto_recover;
1894 bfa_ioc_is_operational(struct bfa_ioc_s *ioc)
1896 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1900 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
1902 u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
1904 return ((r32 != BFI_IOC_UNINIT) &&
1905 (r32 != BFI_IOC_INITING) &&
1906 (r32 != BFI_IOC_MEMTEST));
1910 bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg)
1919 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1921 r32 = readl(ioc->ioc_regs.lpu_mbox +
1923 msgp[i] = cpu_to_be32(r32);
1927 * turn off mailbox interrupt by clearing mailbox status
1929 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1930 readl(ioc->ioc_regs.lpu_mbox_cmd);
1934 bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m)
1936 union bfi_ioc_i2h_msg_u *msg;
1937 struct bfa_iocpf_s *iocpf = &ioc->iocpf;
1939 msg = (union bfi_ioc_i2h_msg_u *) m;
1941 bfa_ioc_stats(ioc, ioc_isrs);
1943 switch (msg->mh.msg_id) {
1944 case BFI_IOC_I2H_HBEAT:
1947 case BFI_IOC_I2H_READY_EVENT:
1948 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1951 case BFI_IOC_I2H_ENABLE_REPLY:
1952 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1955 case BFI_IOC_I2H_DISABLE_REPLY:
1956 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1959 case BFI_IOC_I2H_GETATTR_REPLY:
1960 bfa_ioc_getattr_reply(ioc);
1964 bfa_trc(ioc, msg->mh.msg_id);
1970 * IOC attach time initialization and setup.
1972 * @param[in] ioc memory for IOC
1973 * @param[in] bfa driver instance structure
1976 bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn,
1977 struct bfa_timer_mod_s *timer_mod)
1981 ioc->timer_mod = timer_mod;
1982 ioc->fcmode = BFA_FALSE;
1983 ioc->pllinit = BFA_FALSE;
1984 ioc->dbg_fwsave_once = BFA_TRUE;
1985 ioc->iocpf.ioc = ioc;
1987 bfa_ioc_mbox_attach(ioc);
1988 INIT_LIST_HEAD(&ioc->hb_notify_q);
1990 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1991 bfa_fsm_send_event(ioc, IOC_E_RESET);
1995 * Driver detach time IOC cleanup.
1998 bfa_ioc_detach(struct bfa_ioc_s *ioc)
2000 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2004 * Setup IOC PCI properties.
2006 * @param[in] pcidev PCI device information for this IOC
2009 bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
2013 ioc->pcidev = *pcidev;
2014 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
2015 ioc->cna = ioc->ctdev && !ioc->fcmode;
2018 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
2021 bfa_ioc_set_ct_hwif(ioc);
2023 bfa_ioc_set_cb_hwif(ioc);
2025 bfa_ioc_map_port(ioc);
2026 bfa_ioc_reg_init(ioc);
2030 * Initialize IOC dma memory
2032 * @param[in] dm_kva kernel virtual address of IOC dma memory
2033 * @param[in] dm_pa physical address of IOC dma memory
2036 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa)
2039 * dma memory for firmware attribute
2041 ioc->attr_dma.kva = dm_kva;
2042 ioc->attr_dma.pa = dm_pa;
2043 ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
2047 * Return size of dma memory required.
2050 bfa_ioc_meminfo(void)
2052 return BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
2056 bfa_ioc_enable(struct bfa_ioc_s *ioc)
2058 bfa_ioc_stats(ioc, ioc_enables);
2059 ioc->dbg_fwsave_once = BFA_TRUE;
2061 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2065 bfa_ioc_disable(struct bfa_ioc_s *ioc)
2067 bfa_ioc_stats(ioc, ioc_disables);
2068 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2072 * Returns memory required for saving firmware trace in case of crash.
2073 * Driver must call this interface to allocate memory required for
2074 * automatic saving of firmware trace. Driver should call
2075 * bfa_ioc_debug_memclaim() right after bfa_ioc_attach() to setup this
2079 bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover)
2081 return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
2085 * Initialize memory for saving firmware trace. Driver must initialize
2086 * trace memory before call bfa_ioc_enable().
2089 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
2091 ioc->dbg_fwsave = dbg_fwsave;
2092 ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->iocpf.auto_recover);
2096 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr)
2098 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2102 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr)
2104 return PSS_SMEM_PGOFF(fmaddr);
2108 * Register mailbox message handler functions
2110 * @param[in] ioc IOC instance
2111 * @param[in] mcfuncs message class handler functions
2114 bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs)
2116 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2119 for (mc = 0; mc < BFI_MC_MAX; mc++)
2120 mod->mbhdlr[mc].cbfn = mcfuncs[mc];
2124 * Register mailbox message handler function, to be called by common modules
2127 bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
2128 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2130 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2132 mod->mbhdlr[mc].cbfn = cbfn;
2133 mod->mbhdlr[mc].cbarg = cbarg;
2137 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
2138 * Responsibility of caller to serialize
2140 * @param[in] ioc IOC instance
2141 * @param[i] cmd Mailbox command
2144 bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd)
2146 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2150 * If a previous command is pending, queue new command
2152 if (!list_empty(&mod->cmd_q)) {
2153 list_add_tail(&cmd->qe, &mod->cmd_q);
2158 * If mailbox is busy, queue command for poll timer
2160 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2162 list_add_tail(&cmd->qe, &mod->cmd_q);
2167 * mailbox is free -- queue command to firmware
2169 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2173 * Handle mailbox interrupts
2176 bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
2178 struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod;
2179 struct bfi_mbmsg_s m;
2182 bfa_ioc_msgget(ioc, &m);
2185 * Treat IOC message class as special.
2187 mc = m.mh.msg_class;
2188 if (mc == BFI_MC_IOC) {
2189 bfa_ioc_isr(ioc, &m);
2193 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2196 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2200 bfa_ioc_error_isr(struct bfa_ioc_s *ioc)
2202 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2206 bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc)
2208 ioc->fcmode = BFA_TRUE;
2209 ioc->port_id = bfa_ioc_pcifn(ioc);
2213 * return true if IOC is disabled
2216 bfa_ioc_is_disabled(struct bfa_ioc_s *ioc)
2218 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2219 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2223 * return true if IOC firmware is different.
2226 bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc)
2228 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) ||
2229 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_fwcheck) ||
2230 bfa_fsm_cmp_state(&ioc->iocpf, bfa_iocpf_sm_mismatch);
2233 #define bfa_ioc_state_disabled(__sm) \
2234 (((__sm) == BFI_IOC_UNINIT) || \
2235 ((__sm) == BFI_IOC_INITING) || \
2236 ((__sm) == BFI_IOC_HWINIT) || \
2237 ((__sm) == BFI_IOC_DISABLED) || \
2238 ((__sm) == BFI_IOC_FAIL) || \
2239 ((__sm) == BFI_IOC_CFG_DISABLED))
2242 * Check if adapter is disabled -- both IOCs should be in a disabled
2246 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
2249 void __iomem *rb = ioc->pcidev.pci_bar_kva;
2251 if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
2254 ioc_state = readl(rb + BFA_IOC0_STATE_REG);
2255 if (!bfa_ioc_state_disabled(ioc_state))
2258 if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
2259 ioc_state = readl(rb + BFA_IOC1_STATE_REG);
2260 if (!bfa_ioc_state_disabled(ioc_state))
2268 * Add to IOC heartbeat failure notification queue. To be used by common
2269 * modules such as cee, port, diag.
2272 bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
2273 struct bfa_ioc_hbfail_notify_s *notify)
2275 list_add_tail(¬ify->qe, &ioc->hb_notify_q);
2278 #define BFA_MFG_NAME "Brocade"
2280 bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
2281 struct bfa_adapter_attr_s *ad_attr)
2283 struct bfi_ioc_attr_s *ioc_attr;
2285 ioc_attr = ioc->attr;
2287 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2288 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2289 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2290 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2291 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2292 sizeof(struct bfa_mfg_vpd_s));
2294 ad_attr->nports = bfa_ioc_get_nports(ioc);
2295 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2297 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2298 /* For now, model descr uses same model string */
2299 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2301 ad_attr->card_type = ioc_attr->card_type;
2302 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2304 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2305 ad_attr->prototype = 1;
2307 ad_attr->prototype = 0;
2309 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2310 ad_attr->mac = bfa_ioc_get_mac(ioc);
2312 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2313 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2314 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2315 ad_attr->asic_rev = ioc_attr->asic_rev;
2317 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2319 ad_attr->cna_capable = ioc->cna;
2320 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
2324 bfa_ioc_get_type(struct bfa_ioc_s *ioc)
2326 if (!ioc->ctdev || ioc->fcmode)
2327 return BFA_IOC_TYPE_FC;
2328 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2329 return BFA_IOC_TYPE_FCoE;
2330 else if (ioc->ioc_mc == BFI_MC_LL)
2331 return BFA_IOC_TYPE_LL;
2333 bfa_assert(ioc->ioc_mc == BFI_MC_LL);
2334 return BFA_IOC_TYPE_LL;
2339 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
2341 memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2342 memcpy((void *)serial_num,
2343 (void *)ioc->attr->brcd_serialnum,
2344 BFA_ADAPTER_SERIAL_NUM_LEN);
2348 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
2350 memset((void *)fw_ver, 0, BFA_VERSION_LEN);
2351 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2355 bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
2357 bfa_assert(chip_rev);
2359 memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2365 chip_rev[4] = ioc->attr->asic_rev;
2370 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
2372 memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
2373 memcpy(optrom_ver, ioc->attr->optrom_version,
2378 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
2380 memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2381 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2385 bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
2387 struct bfi_ioc_attr_s *ioc_attr;
2390 memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2392 ioc_attr = ioc->attr;
2397 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2398 BFA_MFG_NAME, ioc_attr->card_type);
2402 bfa_ioc_get_state(struct bfa_ioc_s *ioc)
2404 enum bfa_iocpf_state iocpf_st;
2405 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2407 if (ioc_st == BFA_IOC_ENABLING ||
2408 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2410 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2413 case BFA_IOCPF_SEMWAIT:
2414 ioc_st = BFA_IOC_SEMWAIT;
2417 case BFA_IOCPF_HWINIT:
2418 ioc_st = BFA_IOC_HWINIT;
2421 case BFA_IOCPF_FWMISMATCH:
2422 ioc_st = BFA_IOC_FWMISMATCH;
2425 case BFA_IOCPF_FAIL:
2426 ioc_st = BFA_IOC_FAIL;
2429 case BFA_IOCPF_INITFAIL:
2430 ioc_st = BFA_IOC_INITFAIL;
2442 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
2444 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
2446 ioc_attr->state = bfa_ioc_get_state(ioc);
2447 ioc_attr->port_id = ioc->port_id;
2449 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2451 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2453 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2454 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2455 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2462 bfa_ioc_get_pwwn(struct bfa_ioc_s *ioc)
2464 return ioc->attr->pwwn;
2468 bfa_ioc_get_nwwn(struct bfa_ioc_s *ioc)
2470 return ioc->attr->nwwn;
2474 bfa_ioc_get_adid(struct bfa_ioc_s *ioc)
2476 return ioc->attr->mfg_pwwn;
2480 bfa_ioc_get_mac(struct bfa_ioc_s *ioc)
2483 * Check the IOC type and return the appropriate MAC
2485 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
2486 return ioc->attr->fcoe_mac;
2488 return ioc->attr->mac;
2492 bfa_ioc_get_mfg_pwwn(struct bfa_ioc_s *ioc)
2494 return ioc->attr->mfg_pwwn;
2498 bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc)
2500 return ioc->attr->mfg_nwwn;
2504 bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc)
2508 m = ioc->attr->mfg_mac;
2509 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
2510 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
2512 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
2513 bfa_ioc_pcifn(ioc));
2519 bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc)
2521 return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
2525 * Retrieve saved firmware trace from a prior IOC failure.
2528 bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2532 if (ioc->dbg_fwsave_len == 0)
2533 return BFA_STATUS_ENOFSAVE;
2536 if (tlen > ioc->dbg_fwsave_len)
2537 tlen = ioc->dbg_fwsave_len;
2539 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2541 return BFA_STATUS_OK;
2545 * Clear saved firmware trace
2548 bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
2550 ioc->dbg_fwsave_once = BFA_TRUE;
2554 * Retrieve saved firmware trace from a prior IOC failure.
2557 bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata, int *trclen)
2559 u32 loff = BFA_DBG_FWTRC_OFF(bfa_ioc_portid(ioc));
2561 bfa_status_t status;
2563 bfa_trc(ioc, *trclen);
2566 if (tlen > BFA_DBG_FWTRC_LEN)
2567 tlen = BFA_DBG_FWTRC_LEN;
2569 status = bfa_ioc_smem_read(ioc, trcdata, loff, tlen);
2575 bfa_ioc_send_fwsync(struct bfa_ioc_s *ioc)
2577 struct bfa_mbox_cmd_s cmd;
2578 struct bfi_ioc_ctrl_req_s *req = (struct bfi_ioc_ctrl_req_s *) cmd.msg;
2580 bfi_h2i_set(req->mh, BFI_MC_IOC, BFI_IOC_H2I_DBG_SYNC,
2581 bfa_ioc_portid(ioc));
2582 req->ioc_class = ioc->ioc_mc;
2583 bfa_ioc_mbox_queue(ioc, &cmd);
2587 bfa_ioc_fwsync(struct bfa_ioc_s *ioc)
2589 u32 fwsync_iter = 1000;
2591 bfa_ioc_send_fwsync(ioc);
2594 * After sending a fw sync mbox command wait for it to
2595 * take effect. We will not wait for a response because
2596 * 1. fw_sync mbox cmd doesn't have a response.
2597 * 2. Even if we implement that, interrupts might not
2598 * be enabled when we call this function.
2599 * So, just keep checking if any mbox cmd is pending, and
2600 * after waiting for a reasonable amount of time, go ahead.
2601 * It is possible that fw has crashed and the mbox command
2602 * is never acknowledged.
2604 while (bfa_ioc_mbox_cmd_pending(ioc) && fwsync_iter > 0)
2609 * Dump firmware smem
2612 bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
2613 u32 *offset, int *buflen)
2617 bfa_status_t status;
2618 u32 smem_len = BFA_IOC_FW_SMEM_SIZE(ioc);
2620 if (*offset >= smem_len) {
2621 *offset = *buflen = 0;
2622 return BFA_STATUS_EINVAL;
2629 * First smem read, sync smem before proceeding
2630 * No need to sync before reading every chunk.
2633 bfa_ioc_fwsync(ioc);
2635 if ((loff + dlen) >= smem_len)
2636 dlen = smem_len - loff;
2638 status = bfa_ioc_smem_read(ioc, buf, loff, dlen);
2640 if (status != BFA_STATUS_OK) {
2641 *offset = *buflen = 0;
2647 if (*offset >= smem_len)
2656 * Firmware statistics
2659 bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats)
2661 u32 loff = BFI_IOC_FWSTATS_OFF + \
2662 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2664 bfa_status_t status;
2666 if (ioc->stats_busy) {
2667 bfa_trc(ioc, ioc->stats_busy);
2668 return BFA_STATUS_DEVBUSY;
2670 ioc->stats_busy = BFA_TRUE;
2672 tlen = sizeof(struct bfa_fw_stats_s);
2673 status = bfa_ioc_smem_read(ioc, stats, loff, tlen);
2675 ioc->stats_busy = BFA_FALSE;
2680 bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc)
2682 u32 loff = BFI_IOC_FWSTATS_OFF + \
2683 BFI_IOC_FWSTATS_SZ * (bfa_ioc_portid(ioc));
2685 bfa_status_t status;
2687 if (ioc->stats_busy) {
2688 bfa_trc(ioc, ioc->stats_busy);
2689 return BFA_STATUS_DEVBUSY;
2691 ioc->stats_busy = BFA_TRUE;
2693 tlen = sizeof(struct bfa_fw_stats_s);
2694 status = bfa_ioc_smem_clr(ioc, loff, tlen);
2696 ioc->stats_busy = BFA_FALSE;
2701 * Save firmware trace if configured.
2704 bfa_ioc_debug_save(struct bfa_ioc_s *ioc)
2708 if (ioc->dbg_fwsave_len) {
2709 tlen = ioc->dbg_fwsave_len;
2710 bfa_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2715 * Firmware failure detected. Start recovery actions.
2718 bfa_ioc_recover(struct bfa_ioc_s *ioc)
2720 if (ioc->dbg_fwsave_once) {
2721 ioc->dbg_fwsave_once = BFA_FALSE;
2722 bfa_ioc_debug_save(ioc);
2725 bfa_ioc_stats(ioc, ioc_hbfails);
2726 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2730 bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc)
2732 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2737 * hal_iocpf_pvt BFA IOC PF private functions
2741 bfa_iocpf_enable(struct bfa_ioc_s *ioc)
2743 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2747 bfa_iocpf_disable(struct bfa_ioc_s *ioc)
2749 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2753 bfa_iocpf_fail(struct bfa_ioc_s *ioc)
2755 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2759 bfa_iocpf_initfail(struct bfa_ioc_s *ioc)
2761 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2765 bfa_iocpf_getattrfail(struct bfa_ioc_s *ioc)
2767 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2771 bfa_iocpf_stop(struct bfa_ioc_s *ioc)
2773 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2777 bfa_iocpf_timeout(void *ioc_arg)
2779 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2782 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2786 bfa_iocpf_sem_timeout(void *ioc_arg)
2788 struct bfa_ioc_s *ioc = (struct bfa_ioc_s *) ioc_arg;
2790 bfa_ioc_hw_sem_get(ioc);
2794 * bfa timer function
2797 bfa_timer_init(struct bfa_timer_mod_s *mod)
2799 INIT_LIST_HEAD(&mod->timer_q);
2803 bfa_timer_beat(struct bfa_timer_mod_s *mod)
2805 struct list_head *qh = &mod->timer_q;
2806 struct list_head *qe, *qe_next;
2807 struct bfa_timer_s *elem;
2808 struct list_head timedout_q;
2810 INIT_LIST_HEAD(&timedout_q);
2812 qe = bfa_q_next(qh);
2815 qe_next = bfa_q_next(qe);
2817 elem = (struct bfa_timer_s *) qe;
2818 if (elem->timeout <= BFA_TIMER_FREQ) {
2820 list_del(&elem->qe);
2821 list_add_tail(&elem->qe, &timedout_q);
2823 elem->timeout -= BFA_TIMER_FREQ;
2826 qe = qe_next; /* go to next elem */
2830 * Pop all the timeout entries
2832 while (!list_empty(&timedout_q)) {
2833 bfa_q_deq(&timedout_q, &elem);
2834 elem->timercb(elem->arg);
2839 * Should be called with lock protection
2842 bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
2843 void (*timercb) (void *), void *arg, unsigned int timeout)
2846 bfa_assert(timercb != NULL);
2847 bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
2849 timer->timeout = timeout;
2850 timer->timercb = timercb;
2853 list_add_tail(&timer->qe, &mod->timer_q);
2857 * Should be called with lock protection
2860 bfa_timer_stop(struct bfa_timer_s *timer)
2862 bfa_assert(!list_empty(&timer->qe));
2864 list_del(&timer->qe);