2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
22 #include "bfi_ctreg.h"
26 * IOC local definitions
29 #define bfa_ioc_timer_start(__ioc) \
30 mod_timer(&(__ioc)->ioc_timer, jiffies + \
31 msecs_to_jiffies(BFA_IOC_TOV))
32 #define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
34 #define bfa_ioc_recovery_timer_start(__ioc) \
35 mod_timer(&(__ioc)->ioc_timer, jiffies + \
36 msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
38 #define bfa_sem_timer_start(__ioc) \
39 mod_timer(&(__ioc)->sem_timer, jiffies + \
40 msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
41 #define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
43 #define bfa_hb_timer_start(__ioc) \
44 mod_timer(&(__ioc)->hb_timer, jiffies + \
45 msecs_to_jiffies(BFA_IOC_HB_TOV))
46 #define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
52 #define bfa_ioc_firmware_lock(__ioc) \
53 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
54 #define bfa_ioc_firmware_unlock(__ioc) \
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58 #define bfa_ioc_notify_hbfail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
61 #define bfa_ioc_is_optrom(__ioc) \
62 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ)
64 #define bfa_ioc_mbox_cmd_pending(__ioc) \
65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
66 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
68 bool bfa_nw_auto_recover = true;
71 * forward declarations
73 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
74 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
75 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
76 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
77 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
78 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
79 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
80 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
81 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
82 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
83 static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
84 static void bfa_ioc_recover(struct bfa_ioc *ioc);
85 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
86 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
87 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
88 static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
90 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
91 static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
92 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
94 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
96 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
98 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
100 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
102 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
103 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
104 static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
107 * IOC state machine events
110 IOC_E_ENABLE = 1, /*!< IOC enable request */
111 IOC_E_DISABLE = 2, /*!< IOC disable request */
112 IOC_E_TIMEOUT = 3, /*!< f/w response timeout */
113 IOC_E_FWREADY = 4, /*!< f/w initialization done */
114 IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */
115 IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */
116 IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */
117 IOC_E_HBFAIL = 8, /*!< heartbeat failure */
118 IOC_E_HWERROR = 9, /*!< hardware error interrupt */
119 IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
120 IOC_E_DETACH = 11, /*!< driver detach cleanup */
123 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
124 bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
125 bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
126 bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
127 bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
128 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
129 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
130 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
131 bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event);
132 bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event);
133 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
134 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
136 static struct bfa_sm_table ioc_sm_table[] = {
137 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
138 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH},
139 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
140 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
141 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
142 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
143 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
144 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
145 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL},
146 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL},
147 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
148 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
152 * Reset entry actions -- initialize state machine
155 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
157 ioc->retry_count = 0;
158 ioc->auto_recover = bfa_nw_auto_recover;
162 * Beginning state. IOC is in reset state.
165 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
169 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
173 bfa_ioc_disable_comp(ioc);
180 bfa_sm_fault(ioc, event);
185 * Semaphore should be acquired for version check.
188 bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc)
190 bfa_ioc_hw_sem_get(ioc);
194 * Awaiting h/w semaphore to continue with version check.
197 bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
200 case IOC_E_SEMLOCKED:
201 if (bfa_ioc_firmware_lock(ioc)) {
202 ioc->retry_count = 0;
203 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
205 bfa_nw_ioc_hw_sem_release(ioc);
206 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch);
211 bfa_ioc_disable_comp(ioc);
215 bfa_ioc_hw_sem_get_cancel(ioc);
216 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
223 bfa_sm_fault(ioc, event);
228 * Notify enable completion callback and generate mismatch AEN.
231 bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc)
234 * Provide enable completion callback and AEN notification only once.
236 if (ioc->retry_count == 0)
237 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
239 bfa_ioc_timer_start(ioc);
243 * Awaiting firmware version match.
246 bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
250 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck);
254 bfa_ioc_disable_comp(ioc);
258 bfa_ioc_timer_stop(ioc);
259 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
266 bfa_sm_fault(ioc, event);
271 * Request for semaphore.
274 bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc)
276 bfa_ioc_hw_sem_get(ioc);
280 * Awaiting semaphore for h/w initialzation.
283 bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
286 case IOC_E_SEMLOCKED:
287 ioc->retry_count = 0;
288 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
292 bfa_ioc_hw_sem_get_cancel(ioc);
293 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
297 bfa_sm_fault(ioc, event);
302 bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc)
304 bfa_ioc_timer_start(ioc);
305 bfa_ioc_reset(ioc, false);
310 * Hardware is being initialized. Interrupts are enabled.
311 * Holding hardware semaphore lock.
314 bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
318 bfa_ioc_timer_stop(ioc);
319 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
323 bfa_ioc_timer_stop(ioc);
328 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
329 bfa_ioc_timer_start(ioc);
330 bfa_ioc_reset(ioc, true);
334 bfa_nw_ioc_hw_sem_release(ioc);
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
339 bfa_nw_ioc_hw_sem_release(ioc);
340 bfa_ioc_timer_stop(ioc);
341 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
345 bfa_sm_fault(ioc, event);
350 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
352 bfa_ioc_timer_start(ioc);
353 bfa_ioc_send_enable(ioc);
357 * Host IOC function is being enabled, awaiting response from firmware.
358 * Semaphore is acquired.
361 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
364 case IOC_E_FWRSP_ENABLE:
365 bfa_ioc_timer_stop(ioc);
366 bfa_nw_ioc_hw_sem_release(ioc);
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
371 bfa_ioc_timer_stop(ioc);
376 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) {
377 writel(BFI_IOC_UNINIT,
378 ioc->ioc_regs.ioc_fwstate);
379 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
383 bfa_nw_ioc_hw_sem_release(ioc);
384 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
388 bfa_ioc_timer_stop(ioc);
389 bfa_nw_ioc_hw_sem_release(ioc);
390 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
394 bfa_ioc_send_enable(ioc);
398 bfa_sm_fault(ioc, event);
403 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
405 bfa_ioc_timer_start(ioc);
406 bfa_ioc_send_getattr(ioc);
411 * IOC configuration in progress. Timer is active.
414 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
417 case IOC_E_FWRSP_GETATTR:
418 bfa_ioc_timer_stop(ioc);
419 bfa_ioc_check_attr_wwns(ioc);
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
424 bfa_ioc_timer_stop(ioc);
428 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
432 bfa_ioc_timer_stop(ioc);
433 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
437 bfa_sm_fault(ioc, event);
442 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
444 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
445 bfa_ioc_hb_monitor(ioc);
449 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
456 bfa_ioc_hb_stop(ioc);
457 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
463 * Hard error or IOC recovery by other function.
464 * Treat it same as heartbeat failure.
466 bfa_ioc_hb_stop(ioc);
467 /* !!! fall through !!! */
470 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail);
474 bfa_sm_fault(ioc, event);
479 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
481 bfa_ioc_timer_start(ioc);
482 bfa_ioc_send_disable(ioc);
486 * IOC is being disabled
489 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
492 case IOC_E_FWRSP_DISABLE:
493 bfa_ioc_timer_stop(ioc);
494 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
498 bfa_ioc_timer_stop(ioc);
500 * !!! fall through !!!
504 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
505 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
509 bfa_sm_fault(ioc, event);
514 * IOC disable completion entry.
517 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
519 bfa_ioc_disable_comp(ioc);
523 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
531 ioc->cbfn->disable_cbfn(ioc->bfa);
538 bfa_ioc_firmware_unlock(ioc);
539 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
543 bfa_sm_fault(ioc, event);
548 bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc)
550 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
551 bfa_ioc_timer_start(ioc);
556 * Hardware initialization failed.
559 bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
563 bfa_ioc_timer_stop(ioc);
564 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
568 bfa_ioc_timer_stop(ioc);
569 bfa_ioc_firmware_unlock(ioc);
570 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
574 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
578 bfa_sm_fault(ioc, event);
583 bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc)
585 struct list_head *qe;
586 struct bfa_ioc_hbfail_notify *notify;
589 * Mark IOC as failed in hardware and stop firmware.
591 bfa_ioc_lpu_stop(ioc);
592 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
595 * Notify other functions on HB failure.
597 bfa_ioc_notify_hbfail(ioc);
600 * Notify driver and common modules registered for notification.
602 ioc->cbfn->hbfail_cbfn(ioc->bfa);
603 list_for_each(qe, &ioc->hb_notify_q) {
604 notify = (struct bfa_ioc_hbfail_notify *) qe;
605 notify->cbfn(notify->cbarg);
609 * Flush any queued up mailbox requests.
611 bfa_ioc_mbox_hbfail(ioc);
614 * Trigger auto-recovery after a delay.
616 if (ioc->auto_recover)
617 mod_timer(&ioc->ioc_timer, jiffies +
618 msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
623 * IOC heartbeat failure.
626 bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event)
631 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
635 if (ioc->auto_recover)
636 bfa_ioc_timer_stop(ioc);
637 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
641 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
646 * Recovery is already initiated by other function.
652 * HB failure notification, ignore.
656 bfa_sm_fault(ioc, event);
661 * BFA IOC private functions
665 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
667 struct list_head *qe;
668 struct bfa_ioc_hbfail_notify *notify;
670 ioc->cbfn->disable_cbfn(ioc->bfa);
673 * Notify common modules registered for notification.
675 list_for_each(qe, &ioc->hb_notify_q) {
676 notify = (struct bfa_ioc_hbfail_notify *) qe;
677 notify->cbfn(notify->cbarg);
682 bfa_nw_ioc_sem_timeout(void *ioc_arg)
684 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
686 bfa_ioc_hw_sem_get(ioc);
690 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
694 #define BFA_SEM_SPINCNT 3000
696 r32 = readl(sem_reg);
698 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
701 r32 = readl(sem_reg);
707 BUG_ON(!(cnt < BFA_SEM_SPINCNT));
712 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
718 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
723 * First read to the semaphore register will return 0, subsequent reads
724 * will return 1. Semaphore is released by writing 1 to the register
726 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
728 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED);
732 mod_timer(&ioc->sem_timer, jiffies +
733 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
737 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
739 writel(1, ioc->ioc_regs.ioc_sem_reg);
743 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
745 del_timer(&ioc->sem_timer);
750 * Initialize LPU local memory (aka secondary memory / SRAM)
753 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
757 #define PSS_LMEM_INIT_TIME 10000
759 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
760 pss_ctl &= ~__PSS_LMEM_RESET;
761 pss_ctl |= __PSS_LMEM_INIT_EN;
764 * i2c workaround 12.5khz clock
766 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
767 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
770 * wait for memory initialization to be complete
774 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
776 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
779 * If memory initialization is not successful, IOC timeout will catch
782 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
784 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
785 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
789 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
794 * Take processor out of reset.
796 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
797 pss_ctl &= ~__PSS_LPU0_RESET;
799 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
803 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
808 * Put processors in reset.
810 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
811 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
813 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
817 * Get driver and firmware versions.
820 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
825 u32 *fwsig = (u32 *) fwhdr;
827 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
828 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
829 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
831 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
834 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
840 * Returns TRUE if same.
843 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
845 struct bfi_ioc_image_hdr *drv_fwhdr;
848 drv_fwhdr = (struct bfi_ioc_image_hdr *)
849 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
851 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
852 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
860 * Return true if current running version is valid. Firmware signature and
861 * execution context (driver/bios) must match.
864 bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
866 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
869 * If bios/efi boot (flash based) -- return true
871 if (bfa_ioc_is_optrom(ioc))
874 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
875 drv_fwhdr = (struct bfi_ioc_image_hdr *)
876 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
878 if (fwhdr.signature != drv_fwhdr->signature)
881 if (fwhdr.exec != drv_fwhdr->exec)
884 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
888 * Conditionally flush any pending message from firmware at start.
891 bfa_ioc_msgflush(struct bfa_ioc *ioc)
895 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
897 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
901 * @img ioc_init_logic.jpg
904 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
906 enum bfi_ioc_state ioc_fwstate;
909 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
912 ioc_fwstate = BFI_IOC_UNINIT;
915 * check if firmware is valid
917 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
918 false : bfa_ioc_fwver_valid(ioc);
921 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
926 * If hardware initialization is in progress (initialized by other IOC),
927 * just wait for an initialization completion interrupt.
929 if (ioc_fwstate == BFI_IOC_INITING) {
930 ioc->cbfn->reset_cbfn(ioc->bfa);
935 * If IOC function is disabled and firmware version is same,
936 * just re-enable IOC.
938 * If option rom, IOC must not be in operational state. With
939 * convergence, IOC will be in operational state when 2nd driver
942 if (ioc_fwstate == BFI_IOC_DISABLED ||
943 (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
945 * When using MSI-X any pending firmware ready event should
946 * be flushed. Otherwise MSI-X interrupts are not delivered.
948 bfa_ioc_msgflush(ioc);
949 ioc->cbfn->reset_cbfn(ioc->bfa);
950 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
955 * Initialize the h/w for any other states.
957 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
961 bfa_nw_ioc_timeout(void *ioc_arg)
963 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
965 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
969 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
971 u32 *msgp = (u32 *) ioc_msg;
974 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
977 * first write msg to mailbox registers
979 for (i = 0; i < len / sizeof(u32); i++)
980 writel(cpu_to_le32(msgp[i]),
981 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
983 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
984 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
987 * write 1 to mailbox CMD to trigger LPU event
989 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
990 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
994 bfa_ioc_send_enable(struct bfa_ioc *ioc)
996 struct bfi_ioc_ctrl_req enable_req;
999 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1000 bfa_ioc_portid(ioc));
1001 enable_req.ioc_class = ioc->ioc_mc;
1002 do_gettimeofday(&tv);
1003 enable_req.tv_sec = ntohl(tv.tv_sec);
1004 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1008 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1010 struct bfi_ioc_ctrl_req disable_req;
1012 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1013 bfa_ioc_portid(ioc));
1014 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1018 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1020 struct bfi_ioc_getattr_req attr_req;
1022 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1023 bfa_ioc_portid(ioc));
1024 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1025 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1029 bfa_nw_ioc_hb_check(void *cbarg)
1031 struct bfa_ioc *ioc = cbarg;
1034 hb_count = readl(ioc->ioc_regs.heartbeat);
1035 if (ioc->hb_count == hb_count) {
1036 pr_crit("Firmware heartbeat failure at %d", hb_count);
1037 bfa_ioc_recover(ioc);
1040 ioc->hb_count = hb_count;
1043 bfa_ioc_mbox_poll(ioc);
1044 mod_timer(&ioc->hb_timer, jiffies +
1045 msecs_to_jiffies(BFA_IOC_HB_TOV));
1049 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1051 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1052 mod_timer(&ioc->hb_timer, jiffies +
1053 msecs_to_jiffies(BFA_IOC_HB_TOV));
1057 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1059 del_timer(&ioc->hb_timer);
1064 * Initiate a full firmware download.
1067 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1077 * Initialize LMEM first before code download
1079 bfa_ioc_lmem_init(ioc);
1082 * Flash based firmware boot
1084 if (bfa_ioc_is_optrom(ioc))
1085 boot_type = BFI_BOOT_TYPE_FLASH;
1086 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1088 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1089 pgoff = bfa_ioc_smem_pgoff(ioc, loff);
1091 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1093 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1094 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1095 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1096 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1097 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1103 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1104 ((ioc->ioc_regs.smem_page_start) + (loff)));
1106 loff += sizeof(u32);
1109 * handle page offset wrap around
1111 loff = PSS_SMEM_PGOFF(loff);
1115 ioc->ioc_regs.host_page_num_fn);
1119 writel(bfa_ioc_smem_pgnum(ioc, 0),
1120 ioc->ioc_regs.host_page_num_fn);
1123 * Set boot type and boot param at the end.
1125 writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
1126 + (BFI_BOOT_TYPE_OFF)));
1127 writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
1128 + (BFI_BOOT_PARAM_OFF)));
1132 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1134 bfa_ioc_hwinit(ioc, force);
1139 * Update BFA configuration from firmware configuration.
1142 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1144 struct bfi_ioc_attr *attr = ioc->attr;
1146 attr->adapter_prop = ntohl(attr->adapter_prop);
1147 attr->card_type = ntohl(attr->card_type);
1148 attr->maxfrsize = ntohs(attr->maxfrsize);
1150 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1154 * Attach time initialization of mbox logic.
1157 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1159 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1162 INIT_LIST_HEAD(&mod->cmd_q);
1163 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1164 mod->mbhdlr[mc].cbfn = NULL;
1165 mod->mbhdlr[mc].cbarg = ioc->bfa;
1170 * Mbox poll timer -- restarts any pending mailbox requests.
1173 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1175 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1176 struct bfa_mbox_cmd *cmd;
1180 * If no command pending, do nothing
1182 if (list_empty(&mod->cmd_q))
1186 * If previous command is not yet fetched by firmware, do nothing
1188 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1193 * Enqueue command to firmware.
1195 bfa_q_deq(&mod->cmd_q, &cmd);
1196 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1200 * Cleanup any pending requests.
1203 bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1205 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1206 struct bfa_mbox_cmd *cmd;
1208 while (!list_empty(&mod->cmd_q))
1209 bfa_q_deq(&mod->cmd_q, &cmd);
1215 static enum bfa_status
1216 bfa_ioc_pll_init(struct bfa_ioc *ioc)
1219 * Hold semaphore so that nobody can access the chip during init.
1221 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
1223 bfa_ioc_pll_init_asic(ioc);
1225 ioc->pllinit = true;
1227 * release semaphore.
1229 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
1231 return BFA_STATUS_OK;
1235 * Interface used by diag module to do firmware boot with memory test
1236 * as the entry vector.
1239 bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1243 bfa_ioc_stats(ioc, ioc_boots);
1245 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1249 * Initialize IOC state of all functions on a chip reset.
1251 rb = ioc->pcidev.pci_bar_kva;
1252 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1253 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1254 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1256 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1257 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1260 bfa_ioc_msgflush(ioc);
1261 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1264 * Enable interrupts just before starting LPU
1266 ioc->cbfn->reset_cbfn(ioc->bfa);
1267 bfa_ioc_lpu_start(ioc);
1271 * Enable/disable IOC failure auto recovery.
1274 bfa_nw_ioc_auto_recover(bool auto_recover)
1276 bfa_nw_auto_recover = auto_recover;
1280 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
1282 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
1286 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1295 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1297 r32 = readl(ioc->ioc_regs.lpu_mbox +
1299 msgp[i] = htonl(r32);
1303 * turn off mailbox interrupt by clearing mailbox status
1305 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1306 readl(ioc->ioc_regs.lpu_mbox_cmd);
1310 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1312 union bfi_ioc_i2h_msg_u *msg;
1314 msg = (union bfi_ioc_i2h_msg_u *) m;
1316 bfa_ioc_stats(ioc, ioc_isrs);
1318 switch (msg->mh.msg_id) {
1319 case BFI_IOC_I2H_HBEAT:
1322 case BFI_IOC_I2H_READY_EVENT:
1323 bfa_fsm_send_event(ioc, IOC_E_FWREADY);
1326 case BFI_IOC_I2H_ENABLE_REPLY:
1327 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE);
1330 case BFI_IOC_I2H_DISABLE_REPLY:
1331 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE);
1334 case BFI_IOC_I2H_GETATTR_REPLY:
1335 bfa_ioc_getattr_reply(ioc);
1344 * IOC attach time initialization and setup.
1346 * @param[in] ioc memory for IOC
1347 * @param[in] bfa driver instance structure
1350 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1354 ioc->fcmode = false;
1355 ioc->pllinit = false;
1356 ioc->dbg_fwsave_once = true;
1358 bfa_ioc_mbox_attach(ioc);
1359 INIT_LIST_HEAD(&ioc->hb_notify_q);
1361 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
1365 * Driver detach time IOC cleanup.
1368 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
1370 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1374 * Setup IOC PCI properties.
1376 * @param[in] pcidev PCI device information for this IOC
1379 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
1383 ioc->pcidev = *pcidev;
1384 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1385 ioc->cna = ioc->ctdev && !ioc->fcmode;
1387 bfa_nw_ioc_set_ct_hwif(ioc);
1389 bfa_ioc_map_port(ioc);
1390 bfa_ioc_reg_init(ioc);
1394 * Initialize IOC dma memory
1396 * @param[in] dm_kva kernel virtual address of IOC dma memory
1397 * @param[in] dm_pa physical address of IOC dma memory
1400 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
1403 * dma memory for firmware attribute
1405 ioc->attr_dma.kva = dm_kva;
1406 ioc->attr_dma.pa = dm_pa;
1407 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1411 * Return size of dma memory required.
1414 bfa_nw_ioc_meminfo(void)
1416 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1420 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
1422 bfa_ioc_stats(ioc, ioc_enables);
1423 ioc->dbg_fwsave_once = true;
1425 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1429 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
1431 bfa_ioc_stats(ioc, ioc_disables);
1432 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1436 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1438 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1442 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1444 return PSS_SMEM_PGOFF(fmaddr);
1448 * Register mailbox message handler function, to be called by common modules
1451 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
1452 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1454 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1456 mod->mbhdlr[mc].cbfn = cbfn;
1457 mod->mbhdlr[mc].cbarg = cbarg;
1461 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1462 * Responsibility of caller to serialize
1464 * @param[in] ioc IOC instance
1465 * @param[i] cmd Mailbox command
1468 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
1470 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1474 * If a previous command is pending, queue new command
1476 if (!list_empty(&mod->cmd_q)) {
1477 list_add_tail(&cmd->qe, &mod->cmd_q);
1482 * If mailbox is busy, queue command for poll timer
1484 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1486 list_add_tail(&cmd->qe, &mod->cmd_q);
1491 * mailbox is free -- queue command to firmware
1493 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1497 * Handle mailbox interrupts
1500 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
1502 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1506 bfa_ioc_msgget(ioc, &m);
1509 * Treat IOC message class as special.
1511 mc = m.mh.msg_class;
1512 if (mc == BFI_MC_IOC) {
1513 bfa_ioc_isr(ioc, &m);
1517 if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
1520 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
1524 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
1526 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
1530 * Add to IOC heartbeat failure notification queue. To be used by common
1531 * modules such as cee, port, diag.
1534 bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
1535 struct bfa_ioc_hbfail_notify *notify)
1537 list_add_tail(¬ify->qe, &ioc->hb_notify_q);
1540 #define BFA_MFG_NAME "Brocade"
1542 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
1543 struct bfa_adapter_attr *ad_attr)
1545 struct bfi_ioc_attr *ioc_attr;
1547 ioc_attr = ioc->attr;
1549 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
1550 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
1551 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
1552 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
1553 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
1554 sizeof(struct bfa_mfg_vpd));
1556 ad_attr->nports = bfa_ioc_get_nports(ioc);
1557 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
1559 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
1560 /* For now, model descr uses same model string */
1561 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
1563 ad_attr->card_type = ioc_attr->card_type;
1564 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
1566 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
1567 ad_attr->prototype = 1;
1569 ad_attr->prototype = 0;
1571 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
1572 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
1574 ad_attr->pcie_gen = ioc_attr->pcie_gen;
1575 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
1576 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
1577 ad_attr->asic_rev = ioc_attr->asic_rev;
1579 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
1581 ad_attr->cna_capable = ioc->cna;
1582 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
1585 static enum bfa_ioc_type
1586 bfa_ioc_get_type(struct bfa_ioc *ioc)
1588 if (!ioc->ctdev || ioc->fcmode)
1589 return BFA_IOC_TYPE_FC;
1590 else if (ioc->ioc_mc == BFI_MC_IOCFC)
1591 return BFA_IOC_TYPE_FCoE;
1592 else if (ioc->ioc_mc == BFI_MC_LL)
1593 return BFA_IOC_TYPE_LL;
1595 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
1596 return BFA_IOC_TYPE_LL;
1601 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
1603 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
1605 (void *)ioc->attr->brcd_serialnum,
1606 BFA_ADAPTER_SERIAL_NUM_LEN);
1610 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
1612 memset(fw_ver, 0, BFA_VERSION_LEN);
1613 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
1617 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
1619 BUG_ON(!(chip_rev));
1621 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
1627 chip_rev[4] = ioc->attr->asic_rev;
1632 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
1634 memset(optrom_ver, 0, BFA_VERSION_LEN);
1635 memcpy(optrom_ver, ioc->attr->optrom_version,
1640 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
1642 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
1643 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
1647 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
1649 struct bfi_ioc_attr *ioc_attr;
1652 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
1654 ioc_attr = ioc->attr;
1659 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
1660 BFA_MFG_NAME, ioc_attr->card_type);
1663 static enum bfa_ioc_state
1664 bfa_ioc_get_state(struct bfa_ioc *ioc)
1666 return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
1670 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
1672 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
1674 ioc_attr->state = bfa_ioc_get_state(ioc);
1675 ioc_attr->port_id = ioc->port_id;
1677 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
1679 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
1681 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
1682 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
1683 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
1690 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
1692 return ioc->attr->pwwn;
1696 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
1699 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1701 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1702 return bfa_ioc_get_mfg_mac(ioc);
1704 return ioc->attr->mac;
1708 bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
1712 m = ioc->attr->mfg_mac;
1713 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
1714 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1716 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
1717 bfa_ioc_pcifn(ioc));
1723 * Firmware failure detected. Start recovery actions.
1726 bfa_ioc_recover(struct bfa_ioc *ioc)
1728 bfa_ioc_stats(ioc, ioc_hbfails);
1729 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
1733 bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
1735 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)