2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include "bfa_modules.h"
19 #include "bfi_ctreg.h"
22 BFA_TRC_FILE(HAL, CORE);
25 * BFA IOC FC related definitions
29 * IOC local definitions
31 #define BFA_IOCFC_TOV 5000 /* msecs */
34 BFA_IOCFC_ACT_NONE = 0,
35 BFA_IOCFC_ACT_INIT = 1,
36 BFA_IOCFC_ACT_STOP = 2,
37 BFA_IOCFC_ACT_DISABLE = 3,
40 #define DEF_CFG_NUM_FABRICS 1
41 #define DEF_CFG_NUM_LPORTS 256
42 #define DEF_CFG_NUM_CQS 4
43 #define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
44 #define DEF_CFG_NUM_TSKIM_REQS 128
45 #define DEF_CFG_NUM_FCXP_REQS 64
46 #define DEF_CFG_NUM_UF_BUFS 64
47 #define DEF_CFG_NUM_RPORTS 1024
48 #define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
49 #define DEF_CFG_NUM_TINS 256
51 #define DEF_CFG_NUM_SGPGS 2048
52 #define DEF_CFG_NUM_REQQ_ELEMS 256
53 #define DEF_CFG_NUM_RSPQ_ELEMS 64
54 #define DEF_CFG_NUM_SBOOT_TGTS 16
55 #define DEF_CFG_NUM_SBOOT_LUNS 16
58 * forward declaration for IOC FC functions
60 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
61 static void bfa_iocfc_disable_cbfn(void *bfa_arg);
62 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
63 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
64 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
67 * BFA Interrupt handling functions
70 bfa_msix_errint(struct bfa_s *bfa, u32 intr)
72 bfa_ioc_error_isr(&bfa->ioc);
76 bfa_msix_lpu(struct bfa_s *bfa)
78 bfa_ioc_mbox_isr(&bfa->ioc);
82 bfa_reqq_resume(struct bfa_s *bfa, int qid)
84 struct list_head *waitq, *qe, *qen;
85 struct bfa_reqq_wait_s *wqe;
87 waitq = bfa_reqq(bfa, qid);
88 list_for_each_safe(qe, qen, waitq) {
90 * Callback only as long as there is room in request queue
92 if (bfa_reqq_full(bfa, qid))
96 wqe = (struct bfa_reqq_wait_s *) qe;
97 wqe->qresume(wqe->cbarg);
102 bfa_msix_all(struct bfa_s *bfa, int vec)
111 bfa_intx(struct bfa_s *bfa)
116 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
121 * RME completion queue interrupt
123 qintr = intr & __HFN_INT_RME_MASK;
124 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
126 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
127 if (intr & (__HFN_INT_RME_Q0 << queue))
128 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
135 * CPE completion queue interrupt
137 qintr = intr & __HFN_INT_CPE_MASK;
138 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
140 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
141 if (intr & (__HFN_INT_CPE_Q0 << queue))
142 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
148 bfa_msix_lpu_err(bfa, intr);
154 bfa_intx_enable(struct bfa_s *bfa)
156 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask);
160 bfa_intx_disable(struct bfa_s *bfa)
162 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
166 bfa_isr_enable(struct bfa_s *bfa)
169 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
171 bfa_trc(bfa, pci_func);
173 bfa_msix_install(bfa);
174 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
175 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
179 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
180 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
181 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
182 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
183 __HFN_INT_MBOX_LPU0);
185 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
186 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
187 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
188 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
189 __HFN_INT_MBOX_LPU1);
191 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
192 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
193 bfa->iocfc.intr_mask = ~intr_unmask;
194 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
198 bfa_isr_disable(struct bfa_s *bfa)
200 bfa_isr_mode_set(bfa, BFA_FALSE);
201 bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
202 bfa_msix_uninstall(bfa);
206 bfa_msix_reqq(struct bfa_s *bfa, int qid)
208 struct list_head *waitq;
210 qid &= (BFI_IOC_MAX_CQS - 1);
212 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
215 * Resume any pending requests in the corresponding reqq.
217 waitq = bfa_reqq(bfa, qid);
218 if (!list_empty(waitq))
219 bfa_reqq_resume(bfa, qid);
223 bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
225 bfa_trc(bfa, m->mhdr.msg_class);
226 bfa_trc(bfa, m->mhdr.msg_id);
227 bfa_trc(bfa, m->mhdr.mtag.i2htok);
229 bfa_trc_stop(bfa->trcmod);
233 bfa_msix_rspq(struct bfa_s *bfa, int qid)
237 struct list_head *waitq;
239 bfa_trc_fp(bfa, qid);
241 qid &= (BFI_IOC_MAX_CQS - 1);
243 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
245 ci = bfa_rspq_ci(bfa, qid);
246 pi = bfa_rspq_pi(bfa, qid);
251 if (bfa->rme_process) {
253 m = bfa_rspq_elem(bfa, qid, ci);
254 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
256 bfa_isrs[m->mhdr.msg_class] (bfa, m);
258 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
265 bfa_rspq_ci(bfa, qid) = pi;
266 bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
270 * Resume any pending requests in the corresponding reqq.
272 waitq = bfa_reqq(bfa, qid);
273 if (!list_empty(waitq))
274 bfa_reqq_resume(bfa, qid);
278 bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
280 u32 intr, curr_value;
282 intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
284 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
287 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
288 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
291 if (intr & __HFN_INT_LL_HALT) {
293 * If LL_HALT bit is set then FW Init Halt LL Port
294 * Register needs to be cleared as well so Interrupt
295 * Status Register will be cleared.
297 curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
298 curr_value &= ~__FW_INIT_HALT_P;
299 bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
302 if (intr & __HFN_INT_ERR_PSS) {
304 * ERR_PSS bit needs to be cleared as well in case
305 * interrups are shared so driver's interrupt handler is
306 * still called eventhough it is already masked out.
308 curr_value = bfa_reg_read(
309 bfa->ioc.ioc_regs.pss_err_status_reg);
310 curr_value &= __PSS_ERR_STATUS_SET;
311 bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
315 bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
316 bfa_msix_errint(bfa, intr);
321 bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
323 bfa_isrs[mc] = isr_func;
327 * BFA IOC FC related functions
331 * hal_ioc_pvt BFA IOC private functions
335 bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
337 int i, per_reqq_sz, per_rspq_sz;
339 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
341 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
347 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
348 *dm_len = *dm_len + per_reqq_sz;
349 *dm_len = *dm_len + per_rspq_sz;
353 * Calculate Shadow CI/PI size
355 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
356 *dm_len += (2 * BFA_CACHELINE_SZ);
360 bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
363 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
365 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
370 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
373 bfa_iocfc_send_cfg(void *bfa_arg)
375 struct bfa_s *bfa = bfa_arg;
376 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
377 struct bfi_iocfc_cfg_req_s cfg_req;
378 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
379 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
382 bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
383 bfa_trc(bfa, cfg->fwcfg.num_cqs);
385 bfa_iocfc_reset_queues(bfa);
388 * initialize IOC configuration info
390 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
391 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
393 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
395 * dma map REQ and RSP circular queues and shadow pointers
397 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
398 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
399 iocfc->req_cq_ba[i].pa);
400 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
401 iocfc->req_cq_shadow_ci[i].pa);
402 cfg_info->req_cq_elems[i] =
403 bfa_os_htons(cfg->drvcfg.num_reqq_elems);
405 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
406 iocfc->rsp_cq_ba[i].pa);
407 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
408 iocfc->rsp_cq_shadow_pi[i].pa);
409 cfg_info->rsp_cq_elems[i] =
410 bfa_os_htons(cfg->drvcfg.num_rspq_elems);
414 * Enable interrupt coalescing if it is driver init path
415 * and not ioc disable/enable path.
418 cfg_info->intr_attr.coalesce = BFA_TRUE;
420 iocfc->cfgdone = BFA_FALSE;
423 * dma map IOC configuration itself
425 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
427 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
429 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
430 sizeof(struct bfi_iocfc_cfg_req_s));
434 bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
435 struct bfa_pcidev_s *pcidev)
437 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
441 iocfc->action = BFA_IOCFC_ACT_NONE;
443 bfa_os_assign(iocfc->cfg, *cfg);
446 * Initialize chip specific handlers.
448 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
449 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
450 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
451 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
452 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
453 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
454 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
455 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
456 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
457 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
459 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
460 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
461 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
462 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
463 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
464 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
465 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
466 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
467 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
470 iocfc->hwif.hw_reginit(bfa);
475 bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
476 struct bfa_meminfo_s *meminfo)
480 int i, per_reqq_sz, per_rspq_sz;
481 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
484 dm_kva = bfa_meminfo_dma_virt(meminfo);
485 dm_pa = bfa_meminfo_dma_phys(meminfo);
488 * First allocate dma memory for IOC.
490 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
491 dm_kva += bfa_ioc_meminfo();
492 dm_pa += bfa_ioc_meminfo();
495 * Claim DMA-able memory for the request/response queues and for shadow
498 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
500 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
503 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
504 iocfc->req_cq_ba[i].kva = dm_kva;
505 iocfc->req_cq_ba[i].pa = dm_pa;
506 bfa_os_memset(dm_kva, 0, per_reqq_sz);
507 dm_kva += per_reqq_sz;
508 dm_pa += per_reqq_sz;
510 iocfc->rsp_cq_ba[i].kva = dm_kva;
511 iocfc->rsp_cq_ba[i].pa = dm_pa;
512 bfa_os_memset(dm_kva, 0, per_rspq_sz);
513 dm_kva += per_rspq_sz;
514 dm_pa += per_rspq_sz;
517 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
518 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
519 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
520 dm_kva += BFA_CACHELINE_SZ;
521 dm_pa += BFA_CACHELINE_SZ;
523 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
524 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
525 dm_kva += BFA_CACHELINE_SZ;
526 dm_pa += BFA_CACHELINE_SZ;
530 * Claim DMA-able memory for the config info page
532 bfa->iocfc.cfg_info.kva = dm_kva;
533 bfa->iocfc.cfg_info.pa = dm_pa;
534 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
535 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
536 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
539 * Claim DMA-able memory for the config response
541 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
542 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
543 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
546 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
548 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
552 bfa_meminfo_dma_virt(meminfo) = dm_kva;
553 bfa_meminfo_dma_phys(meminfo) = dm_pa;
555 dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
557 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
558 bfa_meminfo_kva(meminfo) += dbgsz;
563 * Start BFA submodules.
566 bfa_iocfc_start_submod(struct bfa_s *bfa)
570 bfa->rme_process = BFA_TRUE;
572 for (i = 0; hal_mods[i]; i++)
573 hal_mods[i]->start(bfa);
577 * Disable BFA submodules.
580 bfa_iocfc_disable_submod(struct bfa_s *bfa)
584 for (i = 0; hal_mods[i]; i++)
585 hal_mods[i]->iocdisable(bfa);
589 bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
591 struct bfa_s *bfa = bfa_arg;
594 if (bfa->iocfc.cfgdone)
595 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
597 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
599 if (bfa->iocfc.cfgdone)
600 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
605 bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
607 struct bfa_s *bfa = bfa_arg;
608 struct bfad_s *bfad = bfa->bfad;
611 complete(&bfad->comp);
613 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
617 bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
619 struct bfa_s *bfa = bfa_arg;
620 struct bfad_s *bfad = bfa->bfad;
623 complete(&bfad->disable_comp);
627 * Update BFA configuration from firmware configuration.
630 bfa_iocfc_cfgrsp(struct bfa_s *bfa)
632 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
633 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
634 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
636 fwcfg->num_cqs = fwcfg->num_cqs;
637 fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs);
638 fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
639 fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
640 fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs);
641 fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports);
643 iocfc->cfgdone = BFA_TRUE;
646 * Configuration is complete - initialize/start submodules
648 bfa_fcport_init(bfa);
650 if (iocfc->action == BFA_IOCFC_ACT_INIT)
651 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
653 bfa_iocfc_start_submod(bfa);
656 bfa_iocfc_reset_queues(struct bfa_s *bfa)
660 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
661 bfa_reqq_ci(bfa, q) = 0;
662 bfa_reqq_pi(bfa, q) = 0;
663 bfa_rspq_ci(bfa, q) = 0;
664 bfa_rspq_pi(bfa, q) = 0;
669 * IOC enable request is complete
672 bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
674 struct bfa_s *bfa = bfa_arg;
676 if (status != BFA_STATUS_OK) {
677 bfa_isr_disable(bfa);
678 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
679 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
680 bfa_iocfc_init_cb, bfa);
684 bfa_iocfc_send_cfg(bfa);
688 * IOC disable request is complete
691 bfa_iocfc_disable_cbfn(void *bfa_arg)
693 struct bfa_s *bfa = bfa_arg;
695 bfa_isr_disable(bfa);
696 bfa_iocfc_disable_submod(bfa);
698 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
699 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
702 bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
703 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
709 * Notify sub-modules of hardware failure.
712 bfa_iocfc_hbfail_cbfn(void *bfa_arg)
714 struct bfa_s *bfa = bfa_arg;
716 bfa->rme_process = BFA_FALSE;
718 bfa_isr_disable(bfa);
719 bfa_iocfc_disable_submod(bfa);
721 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
722 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
727 * Actions on chip-reset completion.
730 bfa_iocfc_reset_cbfn(void *bfa_arg)
732 struct bfa_s *bfa = bfa_arg;
734 bfa_iocfc_reset_queues(bfa);
743 * Query IOC memory requirement information.
746 bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
749 /* dma memory for IOC */
750 *dm_len += bfa_ioc_meminfo();
752 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
753 bfa_iocfc_cqs_sz(cfg, dm_len);
754 *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
758 * Query IOC memory requirement information.
761 bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
762 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
765 struct bfa_ioc_s *ioc = &bfa->ioc;
767 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
768 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
769 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
770 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
772 ioc->trcmod = bfa->trcmod;
773 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
776 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
778 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
779 bfa_ioc_set_fcmode(&bfa->ioc);
781 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
782 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
784 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
785 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
786 bfa_timer_init(&bfa->timer_mod);
788 INIT_LIST_HEAD(&bfa->comp_q);
789 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
790 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
794 * Query IOC memory requirement information.
797 bfa_iocfc_detach(struct bfa_s *bfa)
799 bfa_ioc_detach(&bfa->ioc);
803 * Query IOC memory requirement information.
806 bfa_iocfc_init(struct bfa_s *bfa)
808 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
809 bfa_ioc_enable(&bfa->ioc);
813 * IOC start called from bfa_start(). Called to start IOC operations
814 * at driver instantiation for this instance.
817 bfa_iocfc_start(struct bfa_s *bfa)
819 if (bfa->iocfc.cfgdone)
820 bfa_iocfc_start_submod(bfa);
824 * IOC stop called from bfa_stop(). Called only when driver is unloaded
828 bfa_iocfc_stop(struct bfa_s *bfa)
830 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
832 bfa->rme_process = BFA_FALSE;
833 bfa_ioc_disable(&bfa->ioc);
837 bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
839 struct bfa_s *bfa = bfaarg;
840 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
841 union bfi_iocfc_i2h_msg_u *msg;
843 msg = (union bfi_iocfc_i2h_msg_u *) m;
844 bfa_trc(bfa, msg->mh.msg_id);
846 switch (msg->mh.msg_id) {
847 case BFI_IOCFC_I2H_CFG_REPLY:
848 iocfc->cfg_reply = &msg->cfg_reply;
849 bfa_iocfc_cfgrsp(bfa);
851 case BFI_IOCFC_I2H_UPDATEQ_RSP:
852 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
860 bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
862 bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
866 bfa_adapter_get_id(struct bfa_s *bfa)
868 return bfa_ioc_get_adid(&bfa->ioc);
872 bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
874 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
876 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
878 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
879 bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
880 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
882 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
883 bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
884 bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
886 attr->config = iocfc->cfg;
890 bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
892 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
893 struct bfi_iocfc_set_intr_req_s *m;
895 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
896 iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
897 iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
899 if (!bfa_iocfc_is_operational(bfa))
900 return BFA_STATUS_OK;
902 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
904 return BFA_STATUS_DEVBUSY;
906 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
908 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
909 m->delay = iocfc->cfginfo->intr_attr.delay;
910 m->latency = iocfc->cfginfo->intr_attr.latency;
912 bfa_trc(bfa, attr->delay);
913 bfa_trc(bfa, attr->latency);
915 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
916 return BFA_STATUS_OK;
920 bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
922 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
924 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
925 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
928 * Enable IOC after it is disabled.
931 bfa_iocfc_enable(struct bfa_s *bfa)
933 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
935 bfa_ioc_enable(&bfa->ioc);
939 bfa_iocfc_disable(struct bfa_s *bfa)
941 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
943 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
945 bfa->rme_process = BFA_FALSE;
946 bfa_ioc_disable(&bfa->ioc);
951 bfa_iocfc_is_operational(struct bfa_s *bfa)
953 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
957 * Return boot target port wwns -- read from boot information in flash.
960 bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
962 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
963 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
966 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
967 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
968 *nwwns = cfgrsp->pbc_cfg.nbluns;
969 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
970 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
975 *nwwns = cfgrsp->bootwwns.nwwns;
976 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
980 bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
982 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
983 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
985 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
986 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
987 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
988 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
992 bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
994 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
995 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
997 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
998 return cfgrsp->pbc_cfg.nvports;
1006 * Use this function query the memory requirement of the BFA library.
1007 * This function needs to be called before bfa_attach() to get the
1008 * memory required of the BFA layer for a given driver configuration.
1010 * This call will fail, if the cap is out of range compared to pre-defined
1011 * values within the BFA library
1013 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1014 * its configuration in this structure.
1015 * The default values for struct bfa_iocfc_cfg_s can be
1016 * fetched using bfa_cfg_get_default() API.
1018 * If cap's boundary check fails, the library will use
1019 * the default bfa_cap_t values (and log a warning msg).
1021 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
1022 * indicates the memory type (see bfa_mem_type_t) and
1023 * amount of memory required.
1025 * Driver should allocate the memory, populate the
1026 * starting address for each block and provide the same
1027 * structure as input parameter to bfa_attach() call.
1031 * Special Considerations: @note
1034 bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1037 u32 km_len = 0, dm_len = 0;
1039 bfa_assert((cfg != NULL) && (meminfo != NULL));
1041 bfa_os_memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
1042 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1044 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
1047 bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
1049 for (i = 0; hal_mods[i]; i++)
1050 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
1052 dm_len += bfa_port_meminfo();
1054 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
1055 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1059 * Use this function to do attach the driver instance with the BFA
1060 * library. This function will not trigger any HW initialization
1061 * process (which will be done in bfa_init() call)
1063 * This call will fail, if the cap is out of range compared to
1064 * pre-defined values within the BFA library
1066 * @param[out] bfa Pointer to bfa_t.
1067 * @param[in] bfad Opaque handle back to the driver's IOC structure
1068 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
1069 * that was used in bfa_cfg_get_meminfo().
1070 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1071 * use the bfa_cfg_get_meminfo() call to
1072 * find the memory blocks required, allocate the
1073 * required memory and provide the starting addresses.
1074 * @param[in] pcidev pointer to struct bfa_pcidev_s
1079 * Special Considerations:
1085 bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1086 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1089 struct bfa_mem_elem_s *melem;
1091 bfa->fcs = BFA_FALSE;
1093 bfa_assert((cfg != NULL) && (meminfo != NULL));
1096 * initialize all memory pointers for iterative allocation
1098 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
1099 melem = meminfo->meminfo + i;
1100 melem->kva_curp = melem->kva;
1101 melem->dma_curp = melem->dma;
1104 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
1106 for (i = 0; hal_mods[i]; i++)
1107 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
1109 bfa_com_port_attach(bfa, meminfo);
1113 * Use this function to delete a BFA IOC. IOC should be stopped (by
1114 * calling bfa_stop()) before this function call.
1116 * @param[in] bfa - pointer to bfa_t.
1121 * Special Considerations:
1126 bfa_detach(struct bfa_s *bfa)
1130 for (i = 0; hal_mods[i]; i++)
1131 hal_mods[i]->detach(bfa);
1133 bfa_iocfc_detach(bfa);
1138 bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod)
1140 bfa->trcmod = trcmod;
1144 bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog)
1152 * This function will return immediately, when the IOC initialization is
1153 * completed, the bfa_cb_init() will be called.
1155 * @param[in] bfa instance
1159 * Special Considerations:
1162 * When this function returns, the driver should register the interrupt service
1163 * routine(s) and enable the device interrupts. If this is not done,
1164 * bfa_cb_init() will never get called
1167 bfa_init(struct bfa_s *bfa)
1169 bfa_iocfc_init(bfa);
1173 * Use this function initiate the IOC configuration setup. This function
1174 * will return immediately.
1176 * @param[in] bfa instance
1181 bfa_start(struct bfa_s *bfa)
1183 bfa_iocfc_start(bfa);
1187 * Use this function quiese the IOC. This function will return immediately,
1188 * when the IOC is actually stopped, the bfad->comp will be set.
1190 * @param[in]bfa - pointer to bfa_t.
1194 * Special Considerations:
1195 * bfad->comp can be set before or after bfa_stop() returns.
1198 * In case of any failure, we could handle it automatically by doing a
1199 * reset and then succeed the bfa_stop() call.
1202 bfa_stop(struct bfa_s *bfa)
1204 bfa_iocfc_stop(bfa);
1208 bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1210 INIT_LIST_HEAD(comp_q);
1211 list_splice_tail_init(&bfa->comp_q, comp_q);
1215 bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1217 struct list_head *qe;
1218 struct list_head *qen;
1219 struct bfa_cb_qe_s *hcb_qe;
1221 list_for_each_safe(qe, qen, comp_q) {
1222 hcb_qe = (struct bfa_cb_qe_s *) qe;
1223 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1228 bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1230 struct list_head *qe;
1231 struct bfa_cb_qe_s *hcb_qe;
1233 while (!list_empty(comp_q)) {
1234 bfa_q_deq(comp_q, &qe);
1235 hcb_qe = (struct bfa_cb_qe_s *) qe;
1236 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1241 bfa_attach_fcs(struct bfa_s *bfa)
1243 bfa->fcs = BFA_TRUE;
1247 * Periodic timer heart beat from driver
1250 bfa_timer_tick(struct bfa_s *bfa)
1252 bfa_timer_beat(&bfa->timer_mod);
1256 * Return the list of PCI vendor/device id lists supported by this
1260 bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1262 static struct bfa_pciid_s __pciids[] = {
1263 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1264 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1265 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
1266 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
1269 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
1274 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1275 * into BFA layer). The OS driver can then turn back and overwrite entries that
1276 * have been configured by the user.
1278 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1283 * Special Considerations:
1287 bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1289 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1290 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1291 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1292 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1293 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1294 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1295 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1296 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1298 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1299 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1300 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1301 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1302 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1303 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1304 cfg->drvcfg.ioc_recover = BFA_FALSE;
1305 cfg->drvcfg.delay_comp = BFA_FALSE;
1310 bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1312 bfa_cfg_get_default(cfg);
1313 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1314 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1315 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1316 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1317 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
1319 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1320 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1321 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
1322 cfg->drvcfg.min_cfg = BFA_TRUE;
1326 bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr)
1328 bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
1332 * Retrieve firmware trace information on IOC failure.
1335 bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen)
1337 return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
1341 * Clear the saved firmware trace information of an IOC.
1344 bfa_debug_fwsave_clear(struct bfa_s *bfa)
1346 bfa_ioc_debug_fwsave_clear(&bfa->ioc);
1350 * Fetch firmware trace data.
1352 * @param[in] bfa BFA instance
1353 * @param[out] trcdata Firmware trace buffer
1354 * @param[in,out] trclen Firmware trace buffer len
1356 * @retval BFA_STATUS_OK Firmware trace is fetched.
1357 * @retval BFA_STATUS_INPROGRESS Firmware trace fetch is in progress.
1360 bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen)
1362 return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
1366 * Dump firmware memory.
1368 * @param[in] bfa BFA instance
1369 * @param[out] buf buffer for dump
1370 * @param[in,out] offset smem offset to start read
1371 * @param[in,out] buflen length of buffer
1373 * @retval BFA_STATUS_OK Firmware memory is dumped.
1374 * @retval BFA_STATUS_INPROGRESS Firmware memory dump is in progress.
1377 bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen)
1379 return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
1382 * Reset hw semaphore & usage cnt regs and initialize.
1385 bfa_chip_reset(struct bfa_s *bfa)
1387 bfa_ioc_ownership_reset(&bfa->ioc);
1388 bfa_ioc_pll_init(&bfa->ioc);
1392 * Fetch firmware statistics data.
1394 * @param[in] bfa BFA instance
1395 * @param[out] data Firmware stats buffer
1397 * @retval BFA_STATUS_OK Firmware trace is fetched.
1400 bfa_fw_stats_get(struct bfa_s *bfa, void *data)
1402 return bfa_ioc_fw_stats_get(&bfa->ioc, data);
1406 bfa_fw_stats_clear(struct bfa_s *bfa)
1408 return bfa_ioc_fw_stats_clear(&bfa->ioc);