2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
25 #define bna_ib_find_free_ibidx(_mask, _pos)\
28 while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
29 ((1 << (_pos)) & (_mask)))\
33 #define bna_ib_count_ibidx(_mask, _count)\
37 while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
38 if ((1 << pos) & (_mask))\
44 #define bna_ib_select_segpool(_count, _q_idx)\
48 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
49 if ((_count <= ibidx_pool[i].pool_entry_size)) {\
56 struct bna_ibidx_pool {
60 init_ibidx_pool(ibidx_pool);
62 static struct bna_intr *
63 bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
66 struct bna_intr *intr;
69 list_for_each(qe, &ib_mod->intr_active_q) {
70 intr = (struct bna_intr *)qe;
72 if ((intr->intr_type == intr_type) &&
73 (intr->vector == vector)) {
79 if (list_empty(&ib_mod->intr_free_q))
82 bfa_q_deq(&ib_mod->intr_free_q, &intr);
83 bfa_q_qe_init(&intr->qe);
86 intr->intr_type = intr_type;
87 intr->vector = vector;
89 list_add_tail(&intr->qe, &ib_mod->intr_active_q);
95 bna_intr_put(struct bna_ib_mod *ib_mod,
96 struct bna_intr *intr)
100 if (intr->ref_count == 0) {
103 bfa_q_qe_init(&intr->qe);
104 list_add_tail(&intr->qe, &ib_mod->intr_free_q);
109 bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
110 struct bna_res_info *res_info)
116 struct bna_doorbell_qset *qset;
121 ib_mod->ib = (struct bna_ib *)
122 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
123 ib_mod->intr = (struct bna_intr *)
124 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
125 ib_mod->idx_seg = (struct bna_ibidx_seg *)
126 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
128 INIT_LIST_HEAD(&ib_mod->ib_free_q);
129 INIT_LIST_HEAD(&ib_mod->intr_free_q);
130 INIT_LIST_HEAD(&ib_mod->intr_active_q);
132 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
133 INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
135 for (i = 0; i < BFI_MAX_IB; i++) {
136 ib_mod->ib[i].ib_id = i;
138 ib_mod->ib[i].ib_seg_host_addr_kva =
139 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
140 ib_mod->ib[i].ib_seg_host_addr.lsb =
141 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
142 ib_mod->ib[i].ib_seg_host_addr.msb =
143 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
145 qset = (struct bna_doorbell_qset *)0;
146 off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
148 ib_mod->ib[i].door_bell.doorbell_addr = off +
149 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
151 bfa_q_qe_init(&ib_mod->ib[i].qe);
152 list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
154 bfa_q_qe_init(&ib_mod->intr[i].qe);
155 list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
160 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
161 for (j = 0; j < ibidx_pool[i].pool_size; j++) {
162 bfa_q_qe_init(&ib_mod->idx_seg[count]);
163 ib_mod->idx_seg[count].ib_seg_size =
164 ibidx_pool[i].pool_entry_size;
165 ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
166 list_add_tail(&ib_mod->idx_seg[count].qe,
167 &ib_mod->ibidx_seg_pool[i]);
169 offset += ibidx_pool[i].pool_entry_size;
175 bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
179 struct list_head *qe;
182 list_for_each(qe, &ib_mod->ib_free_q)
186 list_for_each(qe, &ib_mod->intr_free_q)
189 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
191 list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
198 static struct bna_ib *
199 bna_ib_get(struct bna_ib_mod *ib_mod,
200 enum bna_intr_type intr_type,
204 struct bna_intr *intr;
206 if (intr_type == BNA_INTR_T_INTX)
207 vector = (1 << vector);
209 intr = bna_intr_get(ib_mod, intr_type, vector);
214 if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
215 bna_intr_put(ib_mod, intr);
218 intr->ib->ref_count++;
222 if (list_empty(&ib_mod->ib_free_q)) {
223 bna_intr_put(ib_mod, intr);
227 bfa_q_deq(&ib_mod->ib_free_q, &ib);
228 bfa_q_qe_init(&ib->qe);
238 ib->bna = ib_mod->bna;
244 bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
246 bna_intr_put(ib_mod, ib->intr);
250 if (ib->ref_count == 0) {
253 list_add_tail(&ib->qe, &ib_mod->ib_free_q);
257 /* Returns index offset - starting from 0 */
259 bna_ib_reserve_idx(struct bna_ib *ib)
261 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
262 struct bna_ibidx_seg *idx_seg;
267 /* Find the first free index position */
268 bna_ib_find_free_ibidx(ib->idx_mask, idx);
269 if (idx == BFI_IBIDX_MAX_SEGSIZE)
273 * Calculate the total number of indexes held by this IB,
274 * including the index newly reserved above.
276 bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
278 /* See if there is a free space in the index segment held by this IB */
279 if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
280 ib->idx_mask |= (1 << idx);
287 /* Allocate a new segment */
288 bna_ib_select_segpool(num_idx, q_idx);
290 if (q_idx == BFI_IBIDX_TOTAL_POOLS)
292 if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
296 bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
297 bfa_q_qe_init(&idx_seg->qe);
299 /* Free the old segment */
301 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
302 list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
305 ib->idx_seg = idx_seg;
307 ib->idx_mask |= (1 << idx);
313 bna_ib_release_idx(struct bna_ib *ib, int idx)
315 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
316 struct bna_ibidx_seg *idx_seg;
321 ib->idx_mask &= ~(1 << idx);
326 bna_ib_count_ibidx(ib->idx_mask, num_idx);
329 * Free the segment, if there are no more indexes in the segment
333 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
334 list_add_tail(&ib->idx_seg->qe,
335 &ib_mod->ibidx_seg_pool[cur_q_idx]);
340 /* See if we can move to a smaller segment */
341 bna_ib_select_segpool(num_idx, new_q_idx);
342 bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
343 while (new_q_idx < cur_q_idx) {
344 if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
348 if (new_q_idx < cur_q_idx) {
349 /* Select the new smaller segment */
350 bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
351 bfa_q_qe_init(&idx_seg->qe);
352 /* Free the old segment */
353 list_add_tail(&ib->idx_seg->qe,
354 &ib_mod->ibidx_seg_pool[cur_q_idx]);
355 ib->idx_seg = idx_seg;
360 bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
365 ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
366 ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
367 ib->ib_config.interpkt_count = ib_config->interpkt_count;
368 ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
370 ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
371 if (ib->intr->intr_type == BNA_INTR_T_MSIX)
372 ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
378 bna_ib_start(struct bna_ib *ib)
380 struct bna_ib_blk_mem ib_cfg;
381 struct bna_ib_blk_mem *ib_mem;
385 void __iomem *base_addr;
390 if (ib->start_count > 1)
393 ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
394 ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
396 ib_cfg.clsc_n_ctrl_n_msix = (((u32)
397 ib->ib_config.coalescing_timeo << 16) |
398 ((u32)ib->ib_config.ctrl_flags << 8) |
400 ib_cfg.ipkt_n_ent_n_idxof =
402 (ib->ib_config.interpkt_timeo & 0xf) << 16) |
403 ((u32)ib->idx_seg->ib_seg_size << 8) |
404 (ib->idx_seg->ib_idx_tbl_offset);
405 ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
406 ib->ib_config.interpkt_count << 24);
408 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
409 HQM_IB_RAM_BASE_OFFSET);
410 writel(pg_num, ib->bna->regs.page_addr);
412 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
413 HQM_IB_RAM_BASE_OFFSET);
415 ib_mem = (struct bna_ib_blk_mem *)0;
416 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
417 writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
419 off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
420 writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
422 off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
423 writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
425 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
426 writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
428 off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
429 writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
431 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
432 (u32)ib->ib_config.coalescing_timeo, 0);
434 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
435 HQM_INDX_TBL_RAM_BASE_OFFSET);
436 writel(pg_num, ib->bna->regs.page_addr);
438 base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
439 HQM_INDX_TBL_RAM_BASE_OFFSET);
440 for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
441 off = (unsigned long)
442 ((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
443 writel(0, base_addr + off);
446 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
447 bna_intx_disable(ib->bna, intx_mask);
448 intx_mask &= ~(ib->intr->vector);
449 bna_intx_enable(ib->bna, intx_mask);
454 bna_ib_stop(struct bna_ib *ib)
460 if (ib->start_count == 0) {
461 writel(BNA_DOORBELL_IB_INT_DISABLE,
462 ib->door_bell.doorbell_addr);
463 if (ib->intr->intr_type == BNA_INTR_T_INTX) {
464 bna_intx_disable(ib->bna, intx_mask);
465 intx_mask |= (ib->intr->vector);
466 bna_intx_enable(ib->bna, intx_mask);
472 bna_ib_fail(struct bna_ib *ib)
480 static void rxf_enable(struct bna_rxf *rxf);
481 static void rxf_disable(struct bna_rxf *rxf);
482 static void __rxf_config_set(struct bna_rxf *rxf);
483 static void __rxf_rit_set(struct bna_rxf *rxf);
484 static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
485 static int rxf_process_packet_filter(struct bna_rxf *rxf);
486 static int rxf_clear_packet_filter(struct bna_rxf *rxf);
487 static void rxf_reset_packet_filter(struct bna_rxf *rxf);
488 static void rxf_cb_enabled(void *arg, int status);
489 static void rxf_cb_disabled(void *arg, int status);
490 static void bna_rxf_cb_stats_cleared(void *arg, int status);
491 static void __rxf_enable(struct bna_rxf *rxf);
492 static void __rxf_disable(struct bna_rxf *rxf);
494 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
496 bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
498 bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
500 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
502 bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
504 bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
506 bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
508 bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
510 bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
513 static struct bfa_sm_table rxf_sm_table[] = {
514 {BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
515 {BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
516 {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
517 {BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
518 {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
519 {BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
520 {BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
521 {BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
522 {BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
526 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
528 call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
532 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
536 bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
540 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
547 case RXF_E_CAM_FLTR_MOD:
548 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
553 case RXF_E_CAM_FLTR_RESP:
555 * These events are received due to flushing of mbox
562 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
563 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
567 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
568 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
572 bfa_sm_fault(rxf->rx->bna, event);
577 bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
579 __rxf_config_set(rxf);
585 bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
590 * STOP is originated from bnad. When this happens,
591 * it can not be waiting for filter update
593 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
594 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
598 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
599 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
600 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
603 case RXF_E_CAM_FLTR_MOD:
609 * Force rxf_process_filter() to go through initial
612 if ((rxf->ucast_active_mac != NULL) &&
613 (rxf->ucast_pending_set == 0))
614 rxf->ucast_pending_set = 1;
616 if (rxf->rss_status == BNA_STATUS_T_ENABLED)
617 rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
619 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
621 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
626 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
630 bfa_sm_fault(rxf->rx->bna, event);
635 bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
637 if (!rxf_process_packet_filter(rxf)) {
638 /* No more pending CAM entries to update */
639 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
644 bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
649 * STOP is originated from bnad. When this happens,
650 * it can not be waiting for filter update
652 call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
653 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
657 rxf_reset_packet_filter(rxf);
658 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
659 call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
660 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
663 case RXF_E_CAM_FLTR_MOD:
667 case RXF_E_CAM_FLTR_RESP:
668 if (!rxf_process_packet_filter(rxf)) {
669 /* No more pending CAM entries to update */
670 call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
671 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
677 rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
681 bfa_sm_fault(rxf->rx->bna, event);
686 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
688 call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
690 if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
691 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
692 bfa_fsm_send_event(rxf, RXF_E_PAUSE);
694 bfa_fsm_send_event(rxf, RXF_E_RESUME);
700 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
704 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
705 /* Hack to get FSM start clearing CAM entries */
706 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
710 rxf_reset_packet_filter(rxf);
711 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
714 case RXF_E_CAM_FLTR_MOD:
715 bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
719 bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
723 bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
727 bfa_sm_fault(rxf->rx->bna, event);
732 bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
735 * Note: Do not add rxf_clear_packet_filter here.
736 * It will overstep mbox when this transition happens:
737 * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
742 bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
747 * FSM was in the process of stopping, initiated by
748 * bnad. When this happens, no one can be waiting for
749 * start or filter update
751 rxf_reset_packet_filter(rxf);
752 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
755 case RXF_E_CAM_FLTR_RESP:
756 if (!rxf_clear_packet_filter(rxf)) {
757 /* No more pending CAM entries to clear */
758 bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
764 bfa_sm_fault(rxf->rx->bna, event);
769 bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
772 * NOTE: Do not add rxf_disable here.
773 * It will overstep mbox when this transition happens:
774 * start_wait -> stop_wait on RXF_E_STOP event
779 bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
784 * FSM was in the process of stopping, initiated by
785 * bnad. When this happens, no one can be waiting for
786 * start or filter update
788 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
793 * This event is received due to abrupt transition from
794 * bna_rxf_sm_start_wait state on receiving
802 * FSM was in the process of stopping, initiated by
803 * bnad. When this happens, no one can be waiting for
804 * start or filter update
806 bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
810 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
814 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
818 bfa_sm_fault(rxf->rx->bna, event);
823 bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
826 ~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
831 bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
836 * FSM was in the process of disabling rxf, initiated by
839 call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
840 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
844 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
845 call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
846 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
850 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
851 * any other event during these states
854 bfa_sm_fault(rxf->rx->bna, event);
859 bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
861 rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
862 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
867 bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
872 * FSM was in the process of disabling rxf, initiated by
875 call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
876 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
880 rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
881 call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
882 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
886 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
887 * any other event during these states
890 bfa_sm_fault(rxf->rx->bna, event);
895 bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
897 __bna_rxf_stat_clr(rxf);
901 bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
905 case RXF_E_STAT_CLEARED:
906 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
910 bfa_sm_fault(rxf->rx->bna, event);
915 __rxf_enable(struct bna_rxf *rxf)
917 struct bfi_ll_rxf_multi_req ll_req;
920 if (rxf->rxf_id < 32)
921 bm[0] = 1 << rxf->rxf_id;
923 bm[1] = 1 << (rxf->rxf_id - 32);
925 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
926 ll_req.rxf_id_mask[0] = htonl(bm[0]);
927 ll_req.rxf_id_mask[1] = htonl(bm[1]);
930 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
931 rxf_cb_enabled, rxf);
933 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
937 __rxf_disable(struct bna_rxf *rxf)
939 struct bfi_ll_rxf_multi_req ll_req;
942 if (rxf->rxf_id < 32)
943 bm[0] = 1 << rxf->rxf_id;
945 bm[1] = 1 << (rxf->rxf_id - 32);
947 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
948 ll_req.rxf_id_mask[0] = htonl(bm[0]);
949 ll_req.rxf_id_mask[1] = htonl(bm[1]);
952 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
953 rxf_cb_disabled, rxf);
955 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
959 __rxf_config_set(struct bna_rxf *rxf)
962 struct bna_rss_mem *rss_mem;
963 struct bna_rx_fndb_ram *rx_fndb_ram;
964 struct bna *bna = rxf->rx->bna;
965 void __iomem *base_addr;
968 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
969 RSS_TABLE_BASE_OFFSET);
971 rss_mem = (struct bna_rss_mem *)0;
973 /* Configure RSS if required */
974 if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
975 /* configure RSS Table */
976 writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
977 bna->port_num, RSS_TABLE_BASE_OFFSET),
978 bna->regs.page_addr);
980 /* temporarily disable RSS, while hash value is written */
981 off = (unsigned long)&rss_mem[0].type_n_hash;
982 writel(0, base_addr + off);
984 for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
985 off = (unsigned long)
986 &rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
987 writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
991 off = (unsigned long)&rss_mem[0].type_n_hash;
992 writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
997 writel(BNA_GET_PAGE_NUM(
998 LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
999 RX_FNDB_RAM_BASE_OFFSET),
1000 bna->regs.page_addr);
1002 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1003 RX_FNDB_RAM_BASE_OFFSET);
1005 rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
1007 /* We always use RSS table 0 */
1008 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
1009 writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
1012 /* small large buffer enable/disable */
1013 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
1014 writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
1017 /* RIT offset, HDS forced offset, multicast RxQ Id */
1018 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
1019 writel((rxf->rit_segment->rit_offset << 16) |
1020 (rxf->forced_offset << 8) |
1021 (rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
1025 * default vlan tag, default function enable, strip vlan bytes,
1026 * HDS type, header size
1029 off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
1030 writel(((u32)rxf->default_vlan_tag << 16) |
1032 (BNA_RXF_CF_DEFAULT_VLAN |
1033 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
1034 BNA_RXF_CF_VLAN_STRIP)) |
1035 (rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
1036 rxf->hds_cfg.header_size,
1041 __rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
1043 struct bna *bna = rxf->rx->bna;
1046 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
1047 (bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
1048 bna->regs.page_addr);
1050 if (status == BNA_STATUS_T_ENABLED) {
1051 /* enable VLAN filtering on this function */
1052 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1053 writel(rxf->vlan_filter_table[i],
1054 BNA_GET_VLAN_MEM_ENTRY_ADDR
1055 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1059 /* disable VLAN filtering on this function */
1060 for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
1062 BNA_GET_VLAN_MEM_ENTRY_ADDR
1063 (bna->pcidev.pci_bar_kva, rxf->rxf_id,
1070 __rxf_rit_set(struct bna_rxf *rxf)
1072 struct bna *bna = rxf->rx->bna;
1073 struct bna_rit_mem *rit_mem;
1075 void __iomem *base_addr;
1078 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
1079 FUNCTION_TO_RXQ_TRANSLATE);
1081 rit_mem = (struct bna_rit_mem *)0;
1083 writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
1084 FUNCTION_TO_RXQ_TRANSLATE),
1085 bna->regs.page_addr);
1087 for (i = 0; i < rxf->rit_segment->rit_size; i++) {
1088 off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
1089 writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
1090 rxf->rit_segment->rit[i].small_rxq_id,
1096 __bna_rxf_stat_clr(struct bna_rxf *rxf)
1098 struct bfi_ll_stats_req ll_req;
1101 if (rxf->rxf_id < 32)
1102 bm[0] = 1 << rxf->rxf_id;
1104 bm[1] = 1 << (rxf->rxf_id - 32);
1106 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
1107 ll_req.stats_mask = 0;
1108 ll_req.txf_id_mask[0] = 0;
1109 ll_req.txf_id_mask[1] = 0;
1111 ll_req.rxf_id_mask[0] = htonl(bm[0]);
1112 ll_req.rxf_id_mask[1] = htonl(bm[1]);
1114 bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
1115 bna_rxf_cb_stats_cleared, rxf);
1116 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1120 rxf_enable(struct bna_rxf *rxf)
1122 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1123 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1125 rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
1131 rxf_cb_enabled(void *arg, int status)
1133 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1135 bfa_q_qe_init(&rxf->mbox_qe.qe);
1136 bfa_fsm_send_event(rxf, RXF_E_STARTED);
1140 rxf_disable(struct bna_rxf *rxf)
1142 if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
1143 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1145 rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
1150 rxf_cb_disabled(void *arg, int status)
1152 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1154 bfa_q_qe_init(&rxf->mbox_qe.qe);
1155 bfa_fsm_send_event(rxf, RXF_E_STOPPED);
1159 rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
1161 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1163 bfa_q_qe_init(&rxf->mbox_qe.qe);
1165 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
1169 bna_rxf_cb_stats_cleared(void *arg, int status)
1171 struct bna_rxf *rxf = (struct bna_rxf *)arg;
1173 bfa_q_qe_init(&rxf->mbox_qe.qe);
1174 bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
1178 rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
1179 const struct bna_mac *mac_addr)
1181 struct bfi_ll_mac_addr_req req;
1183 bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
1185 req.rxf_id = rxf->rxf_id;
1186 memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
1188 bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
1189 rxf_cb_cam_fltr_mbox_cmd, rxf);
1191 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
1195 rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
1197 struct bna_mac *mac = NULL;
1198 struct list_head *qe;
1200 /* Add multicast entries */
1201 if (!list_empty(&rxf->mcast_pending_add_q)) {
1202 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1204 mac = (struct bna_mac *)qe;
1205 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
1206 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1210 /* Delete multicast entries previousely added */
1211 if (!list_empty(&rxf->mcast_pending_del_q)) {
1212 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1214 mac = (struct bna_mac *)qe;
1215 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1216 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1224 rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
1226 /* Apply the VLAN filter */
1227 if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
1228 rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
1229 if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) &&
1230 !(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
1231 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
1234 /* Apply RSS configuration */
1235 if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
1236 rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
1237 if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
1238 /* RSS is being disabled */
1239 rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
1241 __rxf_config_set(rxf);
1243 /* RSS is being enabled or reconfigured */
1244 rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
1246 __rxf_config_set(rxf);
1254 * Processes pending ucast, mcast entry addition/deletion and issues mailbox
1255 * command. Also processes pending filter configuration - promiscuous mode,
1256 * default mode, allmutli mode and issues mailbox command or directly applies
1260 rxf_process_packet_filter(struct bna_rxf *rxf)
1262 /* Set the default MAC first */
1263 if (rxf->ucast_pending_set > 0) {
1264 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
1265 rxf->ucast_active_mac);
1266 rxf->ucast_pending_set--;
1270 if (rxf_process_packet_filter_ucast(rxf))
1273 if (rxf_process_packet_filter_mcast(rxf))
1276 if (rxf_process_packet_filter_promisc(rxf))
1279 if (rxf_process_packet_filter_default(rxf))
1282 if (rxf_process_packet_filter_allmulti(rxf))
1285 if (rxf_process_packet_filter_vlan(rxf))
1292 rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
1294 struct bna_mac *mac = NULL;
1295 struct list_head *qe;
1297 /* 3. delete pending mcast entries */
1298 if (!list_empty(&rxf->mcast_pending_del_q)) {
1299 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1301 mac = (struct bna_mac *)qe;
1302 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1303 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1307 /* 4. clear active mcast entries; move them to pending_add_q */
1308 if (!list_empty(&rxf->mcast_active_q)) {
1309 bfa_q_deq(&rxf->mcast_active_q, &qe);
1311 mac = (struct bna_mac *)qe;
1312 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
1313 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1321 * In the rxf stop path, processes pending ucast/mcast delete queue and issues
1322 * the mailbox command. Moves the active ucast/mcast entries to pending add q,
1323 * so that they are added to CAM again in the rxf start path. Moves the current
1324 * filter settings - promiscuous, default, allmutli - to pending filter
1328 rxf_clear_packet_filter(struct bna_rxf *rxf)
1330 if (rxf_clear_packet_filter_ucast(rxf))
1333 if (rxf_clear_packet_filter_mcast(rxf))
1336 /* 5. clear active default MAC in the CAM */
1337 if (rxf->ucast_pending_set > 0)
1338 rxf->ucast_pending_set = 0;
1340 if (rxf_clear_packet_filter_promisc(rxf))
1343 if (rxf_clear_packet_filter_default(rxf))
1346 if (rxf_clear_packet_filter_allmulti(rxf))
1353 rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
1355 struct list_head *qe;
1356 struct bna_mac *mac;
1358 /* 3. Move active mcast entries to pending_add_q */
1359 while (!list_empty(&rxf->mcast_active_q)) {
1360 bfa_q_deq(&rxf->mcast_active_q, &qe);
1362 list_add_tail(qe, &rxf->mcast_pending_add_q);
1365 /* 4. Throw away delete pending mcast entries */
1366 while (!list_empty(&rxf->mcast_pending_del_q)) {
1367 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
1369 mac = (struct bna_mac *)qe;
1370 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1375 * In the rxf fail path, throws away the ucast/mcast entries pending for
1376 * deletion, moves all active ucast/mcast entries to pending queue so that
1377 * they are added back to CAM in the rxf start path. Also moves the current
1378 * filter configuration to pending filter configuration.
1381 rxf_reset_packet_filter(struct bna_rxf *rxf)
1383 rxf_reset_packet_filter_ucast(rxf);
1385 rxf_reset_packet_filter_mcast(rxf);
1387 /* 5. Turn off ucast set flag */
1388 rxf->ucast_pending_set = 0;
1390 rxf_reset_packet_filter_promisc(rxf);
1392 rxf_reset_packet_filter_default(rxf);
1394 rxf_reset_packet_filter_allmulti(rxf);
1398 bna_rxf_init(struct bna_rxf *rxf,
1400 struct bna_rx_config *q_config)
1402 struct list_head *qe;
1403 struct bna_rxp *rxp;
1405 /* rxf_id is initialized during rx_mod init */
1408 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
1409 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
1410 rxf->ucast_pending_set = 0;
1411 INIT_LIST_HEAD(&rxf->ucast_active_q);
1412 rxf->ucast_active_mac = NULL;
1414 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
1415 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
1416 INIT_LIST_HEAD(&rxf->mcast_active_q);
1418 bfa_q_qe_init(&rxf->mbox_qe.qe);
1420 if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
1421 rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
1423 rxf->rxf_oper_state = (q_config->paused) ?
1424 BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
1426 bna_rxf_adv_init(rxf, rx, q_config);
1428 rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
1429 q_config->num_paths);
1431 list_for_each(qe, &rx->rxp_q) {
1432 rxp = (struct bna_rxp *)qe;
1433 if (q_config->rxp_type == BNA_RXP_SINGLE)
1434 rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
1436 rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
1440 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
1441 memset(rxf->vlan_filter_table, 0,
1442 (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
1444 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
1448 bna_rxf_uninit(struct bna_rxf *rxf)
1450 struct bna_mac *mac;
1452 bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
1453 rxf->rit_segment = NULL;
1455 rxf->ucast_pending_set = 0;
1457 while (!list_empty(&rxf->ucast_pending_add_q)) {
1458 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
1459 bfa_q_qe_init(&mac->qe);
1460 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
1463 if (rxf->ucast_active_mac) {
1464 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1465 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
1466 rxf->ucast_active_mac);
1467 rxf->ucast_active_mac = NULL;
1470 while (!list_empty(&rxf->mcast_pending_add_q)) {
1471 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
1472 bfa_q_qe_init(&mac->qe);
1473 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1480 bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
1482 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
1483 if (rx->rxf.rxf_id < 32)
1484 rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
1486 rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
1487 1 << (rx->rxf.rxf_id - 32));
1491 bna_rxf_start(struct bna_rxf *rxf)
1493 rxf->start_cbfn = bna_rx_cb_rxf_started;
1494 rxf->start_cbarg = rxf->rx;
1495 rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
1496 bfa_fsm_send_event(rxf, RXF_E_START);
1500 bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
1502 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
1503 if (rx->rxf.rxf_id < 32)
1504 rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
1506 rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
1507 1 << (rx->rxf.rxf_id - 32);
1511 bna_rxf_stop(struct bna_rxf *rxf)
1513 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
1514 rxf->stop_cbarg = rxf->rx;
1515 bfa_fsm_send_event(rxf, RXF_E_STOP);
1519 bna_rxf_fail(struct bna_rxf *rxf)
1521 rxf->rxf_flags |= BNA_RXF_FL_FAILED;
1522 bfa_fsm_send_event(rxf, RXF_E_FAIL);
1526 bna_rxf_state_get(struct bna_rxf *rxf)
1528 return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
1532 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
1533 void (*cbfn)(struct bnad *, struct bna_rx *,
1534 enum bna_cb_status))
1536 struct bna_rxf *rxf = &rx->rxf;
1538 if (rxf->ucast_active_mac == NULL) {
1539 rxf->ucast_active_mac =
1540 bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
1541 if (rxf->ucast_active_mac == NULL)
1542 return BNA_CB_UCAST_CAM_FULL;
1543 bfa_q_qe_init(&rxf->ucast_active_mac->qe);
1546 memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
1547 rxf->ucast_pending_set++;
1548 rxf->cam_fltr_cbfn = cbfn;
1549 rxf->cam_fltr_cbarg = rx->bna->bnad;
1551 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1553 return BNA_CB_SUCCESS;
1557 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
1558 void (*cbfn)(struct bnad *, struct bna_rx *,
1559 enum bna_cb_status))
1561 struct bna_rxf *rxf = &rx->rxf;
1562 struct list_head *qe;
1563 struct bna_mac *mac;
1565 /* Check if already added */
1566 list_for_each(qe, &rxf->mcast_active_q) {
1567 mac = (struct bna_mac *)qe;
1568 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1570 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1571 return BNA_CB_SUCCESS;
1575 /* Check if pending addition */
1576 list_for_each(qe, &rxf->mcast_pending_add_q) {
1577 mac = (struct bna_mac *)qe;
1578 if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
1580 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1581 return BNA_CB_SUCCESS;
1585 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1587 return BNA_CB_MCAST_LIST_FULL;
1588 bfa_q_qe_init(&mac->qe);
1589 memcpy(mac->addr, addr, ETH_ALEN);
1590 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1592 rxf->cam_fltr_cbfn = cbfn;
1593 rxf->cam_fltr_cbarg = rx->bna->bnad;
1595 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1597 return BNA_CB_SUCCESS;
1601 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
1602 void (*cbfn)(struct bnad *, struct bna_rx *,
1603 enum bna_cb_status))
1605 struct bna_rxf *rxf = &rx->rxf;
1606 struct list_head list_head;
1607 struct list_head *qe;
1609 struct bna_mac *mac;
1610 struct bna_mac *mac1;
1613 int need_hw_config = 0;
1616 /* Allocate nodes */
1617 INIT_LIST_HEAD(&list_head);
1618 for (i = 0, mcaddr = mclist; i < count; i++) {
1619 mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
1622 bfa_q_qe_init(&mac->qe);
1623 memcpy(mac->addr, mcaddr, ETH_ALEN);
1624 list_add_tail(&mac->qe, &list_head);
1629 /* Schedule for addition */
1630 while (!list_empty(&list_head)) {
1631 bfa_q_deq(&list_head, &qe);
1632 mac = (struct bna_mac *)qe;
1633 bfa_q_qe_init(&mac->qe);
1637 /* Skip if already added */
1638 list_for_each(qe, &rxf->mcast_active_q) {
1639 mac1 = (struct bna_mac *)qe;
1640 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1641 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1651 /* Skip if pending addition */
1652 list_for_each(qe, &rxf->mcast_pending_add_q) {
1653 mac1 = (struct bna_mac *)qe;
1654 if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
1655 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
1666 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1670 * Delete the entries that are in the pending_add_q but not
1673 while (!list_empty(&rxf->mcast_pending_add_q)) {
1674 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1675 mac = (struct bna_mac *)qe;
1676 bfa_q_qe_init(&mac->qe);
1677 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1678 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1685 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1687 list_add_tail(&mac->qe, &list_head);
1689 while (!list_empty(&list_head)) {
1690 bfa_q_deq(&list_head, &qe);
1691 mac = (struct bna_mac *)qe;
1692 bfa_q_qe_init(&mac->qe);
1693 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1697 * Schedule entries for deletion that are in the active_q but not
1700 while (!list_empty(&rxf->mcast_active_q)) {
1701 bfa_q_deq(&rxf->mcast_active_q, &qe);
1702 mac = (struct bna_mac *)qe;
1703 bfa_q_qe_init(&mac->qe);
1704 for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
1705 if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
1712 list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
1715 list_add_tail(&mac->qe, &list_head);
1718 while (!list_empty(&list_head)) {
1719 bfa_q_deq(&list_head, &qe);
1720 mac = (struct bna_mac *)qe;
1721 bfa_q_qe_init(&mac->qe);
1722 list_add_tail(&mac->qe, &rxf->mcast_active_q);
1725 if (need_hw_config) {
1726 rxf->cam_fltr_cbfn = cbfn;
1727 rxf->cam_fltr_cbarg = rx->bna->bnad;
1728 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1730 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
1732 return BNA_CB_SUCCESS;
1735 while (!list_empty(&list_head)) {
1736 bfa_q_deq(&list_head, &qe);
1737 mac = (struct bna_mac *)qe;
1738 bfa_q_qe_init(&mac->qe);
1739 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1742 return BNA_CB_MCAST_LIST_FULL;
1746 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1748 struct bna_rxf *rxf = &rx->rxf;
1749 int index = (vlan_id >> 5);
1750 int bit = (1 << (vlan_id & 0x1F));
1752 rxf->vlan_filter_table[index] |= bit;
1753 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1754 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1755 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1760 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1762 struct bna_rxf *rxf = &rx->rxf;
1763 int index = (vlan_id >> 5);
1764 int bit = (1 << (vlan_id & 0x1F));
1766 rxf->vlan_filter_table[index] &= ~bit;
1767 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1768 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
1769 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
1776 #define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
1777 struct bna_doorbell_qset *_qset; \
1778 unsigned long off; \
1779 (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
1780 (q)->rcb->q_depth = (qdepth); \
1781 (q)->rcb->unmap_q = unmapq_mem; \
1782 (q)->rcb->rxq = (q); \
1783 (q)->rcb->cq = &(rxp)->cq; \
1784 (q)->rcb->bnad = (bna)->bnad; \
1785 _qset = (struct bna_doorbell_qset *)0; \
1786 off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
1787 (q)->rcb->q_dbell = off + \
1788 BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
1789 (q)->rcb->id = _id; \
1792 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1793 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1795 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1796 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1798 #define call_rx_stop_callback(rx, status) \
1799 if ((rx)->stop_cbfn) { \
1800 (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
1801 (rx)->stop_cbfn = NULL; \
1802 (rx)->stop_cbarg = NULL; \
1806 * Since rx_enable is synchronous callback, there is no start_cbfn required.
1807 * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
1811 #define call_rx_disable_cbfn(rx, status) \
1812 if ((rx)->disable_cbfn) { \
1813 (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
1815 (rx)->disable_cbfn = NULL; \
1816 (rx)->disable_cbarg = NULL; \
1819 #define rxqs_reqd(type, num_rxqs) \
1820 (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
1822 #define rx_ib_fail(rx) \
1824 struct bna_rxp *rxp; \
1825 struct list_head *qe; \
1826 list_for_each(qe, &(rx)->rxp_q) { \
1827 rxp = (struct bna_rxp *)qe; \
1828 bna_ib_fail(rxp->cq.ib); \
1832 static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
1833 static void __bna_rxq_start(struct bna_rxq *rxq);
1834 static void __bna_cq_start(struct bna_cq *cq);
1835 static void bna_rit_create(struct bna_rx *rx);
1836 static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
1837 static void bna_rx_cb_rxq_stopped_all(void *arg);
1839 bfa_fsm_state_decl(bna_rx, stopped,
1840 struct bna_rx, enum bna_rx_event);
1841 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1842 struct bna_rx, enum bna_rx_event);
1843 bfa_fsm_state_decl(bna_rx, started,
1844 struct bna_rx, enum bna_rx_event);
1845 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1846 struct bna_rx, enum bna_rx_event);
1847 bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
1848 struct bna_rx, enum bna_rx_event);
1850 static const struct bfa_sm_table rx_sm_table[] = {
1851 {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
1852 {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
1853 {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
1854 {BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
1855 {BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
1858 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1860 struct bna_rxp *rxp;
1861 struct list_head *qe_rxp;
1863 list_for_each(qe_rxp, &rx->rxp_q) {
1864 rxp = (struct bna_rxp *)qe_rxp;
1865 rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
1868 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1871 static void bna_rx_sm_stopped(struct bna_rx *rx,
1872 enum bna_rx_event event)
1876 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1879 call_rx_stop_callback(rx, BNA_CB_SUCCESS);
1885 bfa_sm_fault(rx->bna, event);
1891 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1893 struct bna_rxp *rxp;
1894 struct list_head *qe_rxp;
1895 struct bna_rxq *q0 = NULL, *q1 = NULL;
1900 list_for_each(qe_rxp, &rx->rxp_q) {
1901 rxp = (struct bna_rxp *)qe_rxp;
1902 bna_ib_start(rxp->cq.ib);
1903 GET_RXQS(rxp, q0, q1);
1904 q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
1905 __bna_rxq_start(q0);
1906 rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
1908 __bna_rxq_start(q1);
1909 rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
1911 __bna_cq_start(&rxp->cq);
1914 bna_rxf_start(&rx->rxf);
1917 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1918 enum bna_rx_event event)
1922 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1925 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1927 bna_rxf_fail(&rx->rxf);
1929 case RX_E_RXF_STARTED:
1930 bfa_fsm_set_state(rx, bna_rx_sm_started);
1933 bfa_sm_fault(rx->bna, event);
1939 bna_rx_sm_started_entry(struct bna_rx *rx)
1941 struct bna_rxp *rxp;
1942 struct list_head *qe_rxp;
1945 list_for_each(qe_rxp, &rx->rxp_q) {
1946 rxp = (struct bna_rxp *)qe_rxp;
1947 bna_ib_ack(&rxp->cq.ib->door_bell, 0);
1950 bna_llport_admin_up(&rx->bna->port.llport);
1954 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1958 bna_llport_admin_down(&rx->bna->port.llport);
1959 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1961 bna_rxf_fail(&rx->rxf);
1964 bna_llport_admin_down(&rx->bna->port.llport);
1965 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1968 bfa_sm_fault(rx->bna, event);
1974 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1976 bna_rxf_stop(&rx->rxf);
1980 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1983 case RX_E_RXF_STOPPED:
1984 bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
1986 case RX_E_RXF_STARTED:
1988 * RxF was in the process of starting up when
1989 * RXF_E_STOP was issued. Ignore this event
1993 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1995 bna_rxf_fail(&rx->rxf);
1998 bfa_sm_fault(rx->bna, event);
2005 bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
2007 struct bna_rxp *rxp = NULL;
2008 struct bna_rxq *q0 = NULL;
2009 struct bna_rxq *q1 = NULL;
2010 struct list_head *qe;
2011 u32 rxq_mask[2] = {0, 0};
2013 /* Only one call to multi-rxq-stop for all RXPs in this RX */
2014 bfa_wc_up(&rx->rxq_stop_wc);
2015 list_for_each(qe, &rx->rxp_q) {
2016 rxp = (struct bna_rxp *)qe;
2017 GET_RXQS(rxp, q0, q1);
2018 if (q0->rxq_id < 32)
2019 rxq_mask[0] |= ((u32)1 << q0->rxq_id);
2021 rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
2023 if (q1->rxq_id < 32)
2024 rxq_mask[0] |= ((u32)1 << q1->rxq_id);
2026 rxq_mask[1] |= ((u32)
2027 1 << (q1->rxq_id - 32));
2031 __bna_multi_rxq_stop(rxp, rxq_mask);
2035 bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
2037 struct bna_rxp *rxp = NULL;
2038 struct list_head *qe;
2041 case RX_E_RXQ_STOPPED:
2042 list_for_each(qe, &rx->rxp_q) {
2043 rxp = (struct bna_rxp *)qe;
2044 bna_ib_stop(rxp->cq.ib);
2048 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2051 bfa_sm_fault(rx->bna, event);
2057 __bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
2059 struct bfi_ll_q_stop_req ll_req;
2061 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
2062 ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
2063 ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
2064 bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
2065 bna_rx_cb_multi_rxq_stopped, rxp);
2066 bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
2070 __bna_rxq_start(struct bna_rxq *rxq)
2072 struct bna_rxtx_q_mem *q_mem;
2073 struct bna_rxq_mem rxq_cfg, *rxq_mem;
2074 struct bna_dma_addr cur_q_addr;
2075 /* struct bna_doorbell_qset *qset; */
2076 struct bna_qpt *qpt;
2078 struct bna *bna = rxq->rx->bna;
2079 void __iomem *base_addr;
2083 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2085 rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2086 rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2087 rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2088 rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2090 rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
2091 rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
2092 (qpt->page_size >> 2);
2093 rxq_cfg.sg_n_cq_n_cns_ptr =
2094 ((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
2095 rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
2097 rxq_cfg.next_qid = 0x0 | (0x3 << 8);
2099 /* Write the page number register */
2100 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2101 HQM_RXTX_Q_RAM_BASE_OFFSET);
2102 writel(pg_num, bna->regs.page_addr);
2105 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2106 HQM_RXTX_Q_RAM_BASE_OFFSET);
2108 q_mem = (struct bna_rxtx_q_mem *)0;
2109 rxq_mem = &q_mem[rxq->rxq_id].rxq;
2111 off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
2112 writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
2114 off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
2115 writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
2117 off = (unsigned long)&rxq_mem->cur_q_entry_lo;
2118 writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
2120 off = (unsigned long)&rxq_mem->cur_q_entry_hi;
2121 writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
2123 off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
2124 writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2126 off = (unsigned long)&rxq_mem->entry_n_pg_size;
2127 writel(rxq_cfg.entry_n_pg_size, base_addr + off);
2129 off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
2130 writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
2132 off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
2133 writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
2135 off = (unsigned long)&rxq_mem->next_qid;
2136 writel(rxq_cfg.next_qid, base_addr + off);
2138 rxq->rcb->producer_index = 0;
2139 rxq->rcb->consumer_index = 0;
2143 __bna_cq_start(struct bna_cq *cq)
2145 struct bna_cq_mem cq_cfg, *cq_mem;
2146 const struct bna_qpt *qpt;
2147 struct bna_dma_addr cur_q_addr;
2149 struct bna *bna = cq->rx->bna;
2150 void __iomem *base_addr;
2154 cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
2157 * Fill out structure, to be subsequently written
2160 cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
2161 cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
2162 cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
2163 cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
2165 cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
2166 cq_cfg.entry_n_pg_size =
2167 ((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
2168 cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
2169 ((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0);
2170 cq_cfg.q_state = BNA_Q_IDLE_STATE;
2172 /* Write the page number register */
2173 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
2174 HQM_CQ_RAM_BASE_OFFSET);
2176 writel(pg_num, bna->regs.page_addr);
2179 base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
2180 HQM_CQ_RAM_BASE_OFFSET);
2182 cq_mem = (struct bna_cq_mem *)0;
2184 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
2185 writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
2187 off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
2188 writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
2190 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
2191 writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
2193 off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
2194 writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
2196 off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
2197 writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
2199 off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
2200 writel(cq_cfg.entry_n_pg_size, base_addr + off);
2202 off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
2203 writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
2205 off = (unsigned long)&cq_mem[cq->cq_id].q_state;
2206 writel(cq_cfg.q_state, base_addr + off);
2208 cq->ccb->producer_index = 0;
2209 *(cq->ccb->hw_producer_index) = 0;
2213 bna_rit_create(struct bna_rx *rx)
2215 struct list_head *qe_rxp;
2217 struct bna_rxp *rxp;
2218 struct bna_rxq *q0 = NULL;
2219 struct bna_rxq *q1 = NULL;
2225 list_for_each(qe_rxp, &rx->rxp_q) {
2226 rxp = (struct bna_rxp *)qe_rxp;
2227 GET_RXQS(rxp, q0, q1);
2228 rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
2229 rx->rxf.rit_segment->rit[offset].small_rxq_id =
2230 (q1 ? q1->rxq_id : 0);
2236 _rx_can_satisfy(struct bna_rx_mod *rx_mod,
2237 struct bna_rx_config *rx_cfg)
2239 if ((rx_mod->rx_free_count == 0) ||
2240 (rx_mod->rxp_free_count == 0) ||
2241 (rx_mod->rxq_free_count == 0))
2244 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
2245 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2246 (rx_mod->rxq_free_count < rx_cfg->num_paths))
2249 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
2250 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
2254 if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
2260 static struct bna_rxq *
2261 _get_free_rxq(struct bna_rx_mod *rx_mod)
2263 struct bna_rxq *rxq = NULL;
2264 struct list_head *qe = NULL;
2266 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
2268 rx_mod->rxq_free_count--;
2269 rxq = (struct bna_rxq *)qe;
2275 _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
2277 bfa_q_qe_init(&rxq->qe);
2278 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
2279 rx_mod->rxq_free_count++;
2282 static struct bna_rxp *
2283 _get_free_rxp(struct bna_rx_mod *rx_mod)
2285 struct list_head *qe = NULL;
2286 struct bna_rxp *rxp = NULL;
2288 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
2290 rx_mod->rxp_free_count--;
2292 rxp = (struct bna_rxp *)qe;
2299 _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
2301 bfa_q_qe_init(&rxp->qe);
2302 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
2303 rx_mod->rxp_free_count++;
2306 static struct bna_rx *
2307 _get_free_rx(struct bna_rx_mod *rx_mod)
2309 struct list_head *qe = NULL;
2310 struct bna_rx *rx = NULL;
2312 bfa_q_deq(&rx_mod->rx_free_q, &qe);
2314 rx_mod->rx_free_count--;
2316 rx = (struct bna_rx *)qe;
2318 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2325 _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2327 bfa_q_qe_init(&rx->qe);
2328 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2329 rx_mod->rx_free_count++;
2333 _rx_init(struct bna_rx *rx, struct bna *bna)
2338 INIT_LIST_HEAD(&rx->rxp_q);
2340 rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
2341 rx->rxq_stop_wc.wc_cbarg = rx;
2342 rx->rxq_stop_wc.wc_count = 0;
2344 rx->stop_cbfn = NULL;
2345 rx->stop_cbarg = NULL;
2349 _rxp_add_rxqs(struct bna_rxp *rxp,
2353 switch (rxp->type) {
2354 case BNA_RXP_SINGLE:
2355 rxp->rxq.single.only = q0;
2356 rxp->rxq.single.reserved = NULL;
2359 rxp->rxq.slr.large = q0;
2360 rxp->rxq.slr.small = q1;
2363 rxp->rxq.hds.data = q0;
2364 rxp->rxq.hds.hdr = q1;
2372 _rxq_qpt_init(struct bna_rxq *rxq,
2373 struct bna_rxp *rxp,
2376 struct bna_mem_descr *qpt_mem,
2377 struct bna_mem_descr *swqpt_mem,
2378 struct bna_mem_descr *page_mem)
2382 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2383 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2384 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2385 rxq->qpt.page_count = page_count;
2386 rxq->qpt.page_size = page_size;
2388 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2390 for (i = 0; i < rxq->qpt.page_count; i++) {
2391 rxq->rcb->sw_qpt[i] = page_mem[i].kva;
2392 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2393 page_mem[i].dma.lsb;
2394 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2395 page_mem[i].dma.msb;
2401 _rxp_cqpt_setup(struct bna_rxp *rxp,
2404 struct bna_mem_descr *qpt_mem,
2405 struct bna_mem_descr *swqpt_mem,
2406 struct bna_mem_descr *page_mem)
2410 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2411 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2412 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2413 rxp->cq.qpt.page_count = page_count;
2414 rxp->cq.qpt.page_size = page_size;
2416 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2418 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2419 rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
2421 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2422 page_mem[i].dma.lsb;
2423 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2424 page_mem[i].dma.msb;
2430 _rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
2432 list_add_tail(&rxp->qe, &rx->rxp_q);
2436 _init_rxmod_queues(struct bna_rx_mod *rx_mod)
2438 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2439 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2440 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2441 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2443 rx_mod->rx_free_count = 0;
2444 rx_mod->rxq_free_count = 0;
2445 rx_mod->rxp_free_count = 0;
2449 _rx_ctor(struct bna_rx *rx, int id)
2451 bfa_q_qe_init(&rx->qe);
2452 INIT_LIST_HEAD(&rx->rxp_q);
2455 rx->rxf.rxf_id = id;
2457 /* FIXME: mbox_qe ctor()?? */
2458 bfa_q_qe_init(&rx->mbox_qe.qe);
2460 rx->stop_cbfn = NULL;
2461 rx->stop_cbarg = NULL;
2465 bna_rx_cb_multi_rxq_stopped(void *arg, int status)
2467 struct bna_rxp *rxp = (struct bna_rxp *)arg;
2469 bfa_wc_down(&rxp->rx->rxq_stop_wc);
2473 bna_rx_cb_rxq_stopped_all(void *arg)
2475 struct bna_rx *rx = (struct bna_rx *)arg;
2477 bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
2481 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
2482 enum bna_cb_status status)
2484 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2486 bfa_wc_down(&rx_mod->rx_stop_wc);
2490 bna_rx_mod_cb_rx_stopped_all(void *arg)
2492 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2494 if (rx_mod->stop_cbfn)
2495 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2496 rx_mod->stop_cbfn = NULL;
2500 bna_rx_start(struct bna_rx *rx)
2502 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2503 if (rx->rx_flags & BNA_RX_F_ENABLE)
2504 bfa_fsm_send_event(rx, RX_E_START);
2508 bna_rx_stop(struct bna_rx *rx)
2510 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2511 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2512 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
2514 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2515 rx->stop_cbarg = &rx->bna->rx_mod;
2516 bfa_fsm_send_event(rx, RX_E_STOP);
2521 bna_rx_fail(struct bna_rx *rx)
2523 /* Indicate port is not enabled, and failed */
2524 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
2525 rx->rx_flags |= BNA_RX_F_PORT_FAILED;
2526 bfa_fsm_send_event(rx, RX_E_FAIL);
2530 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2533 struct list_head *qe;
2535 rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
2536 if (type == BNA_RX_T_LOOPBACK)
2537 rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
2539 list_for_each(qe, &rx_mod->rx_active_q) {
2540 rx = (struct bna_rx *)qe;
2541 if (rx->type == type)
2547 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2550 struct list_head *qe;
2552 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2553 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2555 rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
2558 * Before calling bna_rx_stop(), increment rx_stop_wc as many times
2559 * as we are going to call bna_rx_stop
2561 list_for_each(qe, &rx_mod->rx_active_q) {
2562 rx = (struct bna_rx *)qe;
2563 if (rx->type == type)
2564 bfa_wc_up(&rx_mod->rx_stop_wc);
2567 if (rx_mod->rx_stop_wc.wc_count == 0) {
2568 rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
2569 rx_mod->stop_cbfn = NULL;
2573 list_for_each(qe, &rx_mod->rx_active_q) {
2574 rx = (struct bna_rx *)qe;
2575 if (rx->type == type)
2581 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2584 struct list_head *qe;
2586 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
2587 rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
2589 list_for_each(qe, &rx_mod->rx_active_q) {
2590 rx = (struct bna_rx *)qe;
2595 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2596 struct bna_res_info *res_info)
2599 struct bna_rx *rx_ptr;
2600 struct bna_rxp *rxp_ptr;
2601 struct bna_rxq *rxq_ptr;
2606 rx_mod->rx = (struct bna_rx *)
2607 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2608 rx_mod->rxp = (struct bna_rxp *)
2609 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2610 rx_mod->rxq = (struct bna_rxq *)
2611 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2613 /* Initialize the queues */
2614 _init_rxmod_queues(rx_mod);
2616 /* Build RX queues */
2617 for (index = 0; index < BFI_MAX_RXQ; index++) {
2618 rx_ptr = &rx_mod->rx[index];
2619 _rx_ctor(rx_ptr, index);
2620 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2621 rx_mod->rx_free_count++;
2624 /* build RX-path queue */
2625 for (index = 0; index < BFI_MAX_RXQ; index++) {
2626 rxp_ptr = &rx_mod->rxp[index];
2627 rxp_ptr->cq.cq_id = index;
2628 bfa_q_qe_init(&rxp_ptr->qe);
2629 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2630 rx_mod->rxp_free_count++;
2633 /* build RXQ queue */
2634 for (index = 0; index < BFI_MAX_RXQ; index++) {
2635 rxq_ptr = &rx_mod->rxq[index];
2636 rxq_ptr->rxq_id = index;
2638 bfa_q_qe_init(&rxq_ptr->qe);
2639 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2640 rx_mod->rxq_free_count++;
2643 rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
2644 rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
2645 rx_mod->rx_stop_wc.wc_count = 0;
2649 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2651 struct list_head *qe;
2655 list_for_each(qe, &rx_mod->rx_free_q)
2659 list_for_each(qe, &rx_mod->rxp_free_q)
2663 list_for_each(qe, &rx_mod->rxq_free_q)
2670 bna_rx_state_get(struct bna_rx *rx)
2672 return bfa_sm_to_state(rx_sm_table, rx->fsm);
2676 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2678 u32 cq_size, hq_size, dq_size;
2679 u32 cpage_count, hpage_count, dpage_count;
2680 struct bna_mem_info *mem_info;
2685 dq_depth = q_cfg->q_depth;
2686 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
2687 cq_depth = dq_depth + hq_depth;
2689 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2690 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2691 cq_size = ALIGN(cq_size, PAGE_SIZE);
2692 cpage_count = SIZE_TO_PAGES(cq_size);
2694 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2695 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2696 dq_size = ALIGN(dq_size, PAGE_SIZE);
2697 dpage_count = SIZE_TO_PAGES(dq_size);
2699 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2700 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2701 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2702 hq_size = ALIGN(hq_size, PAGE_SIZE);
2703 hpage_count = SIZE_TO_PAGES(hq_size);
2708 /* CCB structures */
2709 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2710 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2711 mem_info->mem_type = BNA_MEM_T_KVA;
2712 mem_info->len = sizeof(struct bna_ccb);
2713 mem_info->num = q_cfg->num_paths;
2715 /* RCB structures */
2716 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2717 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2718 mem_info->mem_type = BNA_MEM_T_KVA;
2719 mem_info->len = sizeof(struct bna_rcb);
2720 mem_info->num = BNA_GET_RXQS(q_cfg);
2722 /* Completion QPT */
2723 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2724 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2725 mem_info->mem_type = BNA_MEM_T_DMA;
2726 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2727 mem_info->num = q_cfg->num_paths;
2729 /* Completion s/w QPT */
2730 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2731 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2732 mem_info->mem_type = BNA_MEM_T_KVA;
2733 mem_info->len = cpage_count * sizeof(void *);
2734 mem_info->num = q_cfg->num_paths;
2736 /* Completion QPT pages */
2737 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2738 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2739 mem_info->mem_type = BNA_MEM_T_DMA;
2740 mem_info->len = PAGE_SIZE;
2741 mem_info->num = cpage_count * q_cfg->num_paths;
2744 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2745 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2746 mem_info->mem_type = BNA_MEM_T_DMA;
2747 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2748 mem_info->num = q_cfg->num_paths;
2751 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2752 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2753 mem_info->mem_type = BNA_MEM_T_KVA;
2754 mem_info->len = dpage_count * sizeof(void *);
2755 mem_info->num = q_cfg->num_paths;
2757 /* Data QPT pages */
2758 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2759 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2760 mem_info->mem_type = BNA_MEM_T_DMA;
2761 mem_info->len = PAGE_SIZE;
2762 mem_info->num = dpage_count * q_cfg->num_paths;
2765 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2766 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2767 mem_info->mem_type = BNA_MEM_T_DMA;
2768 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2769 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2772 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2773 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2774 mem_info->mem_type = BNA_MEM_T_KVA;
2775 mem_info->len = hpage_count * sizeof(void *);
2776 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2779 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2780 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2781 mem_info->mem_type = BNA_MEM_T_DMA;
2782 mem_info->len = (hpage_count ? PAGE_SIZE : 0);
2783 mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
2786 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2787 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2788 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2792 bna_rx_create(struct bna *bna, struct bnad *bnad,
2793 struct bna_rx_config *rx_cfg,
2794 struct bna_rx_event_cbfn *rx_cbfn,
2795 struct bna_res_info *res_info,
2798 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2800 struct bna_rxp *rxp;
2803 struct bna_intr_info *intr_info;
2805 struct bna_mem_descr *ccb_mem;
2806 struct bna_mem_descr *rcb_mem;
2807 struct bna_mem_descr *unmapq_mem;
2808 struct bna_mem_descr *cqpt_mem;
2809 struct bna_mem_descr *cswqpt_mem;
2810 struct bna_mem_descr *cpage_mem;
2811 struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */
2812 struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */
2813 struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */
2814 struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
2815 struct bna_mem_descr *hpage_mem; /* hdr page mem */
2816 struct bna_mem_descr *dpage_mem; /* data page mem */
2817 int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0, ret;
2818 int dpage_count, hpage_count, rcb_idx;
2819 struct bna_ib_config ibcfg;
2820 /* Fail if we don't have enough RXPs, RXQs */
2821 if (!_rx_can_satisfy(rx_mod, rx_cfg))
2824 /* Initialize resource pointers */
2825 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2826 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2827 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2828 unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
2829 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2830 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2831 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2832 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2833 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2834 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2835 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2836 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2837 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2839 /* Compute q depth & page count */
2840 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
2843 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
2846 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
2848 /* Get RX pointer */
2849 rx = _get_free_rx(rx_mod);
2852 rx->type = rx_cfg->rx_type;
2854 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2855 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2856 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2857 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2858 /* Following callbacks are mandatory */
2859 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2860 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2862 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
2864 case BNA_RX_T_REGULAR:
2865 if (!(rx->bna->rx_mod.flags &
2866 BNA_RX_MOD_F_PORT_LOOPBACK))
2867 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2869 case BNA_RX_T_LOOPBACK:
2870 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
2871 rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
2876 for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
2877 rxp = _get_free_rxp(rx_mod);
2878 rxp->type = rx_cfg->rxp_type;
2882 /* Get required RXQs, and queue them to rx-path */
2883 q0 = _get_free_rxq(rx_mod);
2884 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2887 q1 = _get_free_rxq(rx_mod);
2890 if (1 == intr_info->num) {
2891 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2892 intr_info->intr_type,
2893 intr_info->idl[0].vector);
2894 rxp->vector = intr_info->idl[0].vector;
2896 rxp->cq.ib = bna_ib_get(&bna->ib_mod,
2897 intr_info->intr_type,
2898 intr_info->idl[i].vector);
2900 /* Map the MSI-x vector used for this RXP */
2901 rxp->vector = intr_info->idl[i].vector;
2904 rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
2906 ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
2907 ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
2908 ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2909 ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
2911 ret = bna_ib_config(rxp->cq.ib, &ibcfg);
2913 /* Link rxqs to rxp */
2914 _rxp_add_rxqs(rxp, q0, q1);
2916 /* Link rxp to rx */
2917 _rx_add_rxp(rx, rxp);
2922 /* Initialize RCB for the large / data q */
2923 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2924 RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
2925 (void *)unmapq_mem[rcb_idx].kva);
2927 (q0)->rx_packets = (q0)->rx_bytes = 0;
2928 (q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
2930 /* Initialize RXQs */
2931 _rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
2932 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
2933 q0->rcb->page_idx = dpage_idx;
2934 q0->rcb->page_count = dpage_count;
2935 dpage_idx += dpage_count;
2937 /* Call bnad to complete rcb setup */
2938 if (rx->rcb_setup_cbfn)
2939 rx->rcb_setup_cbfn(bnad, q0->rcb);
2945 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2946 RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
2947 (void *)unmapq_mem[rcb_idx].kva);
2949 (q1)->buffer_size = (rx_cfg)->small_buff_size;
2950 (q1)->rx_packets = (q1)->rx_bytes = 0;
2951 (q1)->rx_packets_with_error =
2952 (q1)->rxbuf_alloc_failed = 0;
2954 _rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
2955 &hqpt_mem[i], &hsqpt_mem[i],
2956 &hpage_mem[hpage_idx]);
2957 q1->rcb->page_idx = hpage_idx;
2958 q1->rcb->page_count = hpage_count;
2959 hpage_idx += hpage_count;
2961 /* Call bnad to complete rcb setup */
2962 if (rx->rcb_setup_cbfn)
2963 rx->rcb_setup_cbfn(bnad, q1->rcb);
2966 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2967 _rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2968 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
2969 rxp->cq.ccb->page_idx = cpage_idx;
2970 rxp->cq.ccb->page_count = page_count;
2971 cpage_idx += page_count;
2973 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2974 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2976 rxp->cq.ccb->producer_index = 0;
2977 rxp->cq.ccb->q_depth = rx_cfg->q_depth +
2978 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2979 0 : rx_cfg->q_depth);
2980 rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
2981 rxp->cq.ccb->rcb[0] = q0->rcb;
2983 rxp->cq.ccb->rcb[1] = q1->rcb;
2984 rxp->cq.ccb->cq = &rxp->cq;
2985 rxp->cq.ccb->bnad = bna->bnad;
2986 rxp->cq.ccb->hw_producer_index =
2987 ((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
2988 (rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
2989 *(rxp->cq.ccb->hw_producer_index) = 0;
2990 rxp->cq.ccb->intr_type = intr_info->intr_type;
2991 rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
2992 intr_info->idl[0].vector :
2993 intr_info->idl[i].vector;
2994 rxp->cq.ccb->rx_coalescing_timeo =
2995 rxp->cq.ib->ib_config.coalescing_timeo;
2996 rxp->cq.ccb->id = i;
2998 /* Call bnad to complete CCB setup */
2999 if (rx->ccb_setup_cbfn)
3000 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
3002 } /* for each rx-path */
3004 bna_rxf_init(&rx->rxf, rx, rx_cfg);
3006 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
3012 bna_rx_destroy(struct bna_rx *rx)
3014 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
3015 struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
3016 struct bna_rxq *q0 = NULL;
3017 struct bna_rxq *q1 = NULL;
3018 struct bna_rxp *rxp;
3019 struct list_head *qe;
3021 bna_rxf_uninit(&rx->rxf);
3023 while (!list_empty(&rx->rxp_q)) {
3024 bfa_q_deq(&rx->rxp_q, &rxp);
3025 GET_RXQS(rxp, q0, q1);
3026 /* Callback to bnad for destroying RCB */
3027 if (rx->rcb_destroy_cbfn)
3028 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
3032 _put_free_rxq(rx_mod, q0);
3034 /* Callback to bnad for destroying RCB */
3035 if (rx->rcb_destroy_cbfn)
3036 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
3040 _put_free_rxq(rx_mod, q1);
3042 rxp->rxq.slr.large = NULL;
3043 rxp->rxq.slr.small = NULL;
3045 if (rxp->cq.ib_seg_offset != 0xff)
3046 bna_ib_release_idx(rxp->cq.ib,
3047 rxp->cq.ib_seg_offset);
3048 bna_ib_put(ib_mod, rxp->cq.ib);
3051 /* Callback to bnad for destroying CCB */
3052 if (rx->ccb_destroy_cbfn)
3053 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
3056 _put_free_rxp(rx_mod, rxp);
3059 list_for_each(qe, &rx_mod->rx_active_q) {
3060 if (qe == &rx->qe) {
3062 bfa_q_qe_init(&rx->qe);
3069 _put_free_rx(rx_mod, rx);
3073 bna_rx_enable(struct bna_rx *rx)
3075 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
3078 rx->rx_flags |= BNA_RX_F_ENABLE;
3079 if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
3080 bfa_fsm_send_event(rx, RX_E_START);
3084 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
3085 void (*cbfn)(void *, struct bna_rx *,
3086 enum bna_cb_status))
3088 if (type == BNA_SOFT_CLEANUP) {
3089 /* h/w should not be accessed. Treat we're stopped */
3090 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
3092 rx->stop_cbfn = cbfn;
3093 rx->stop_cbarg = rx->bna->bnad;
3095 rx->rx_flags &= ~BNA_RX_F_ENABLE;
3097 bfa_fsm_send_event(rx, RX_E_STOP);
3104 #define call_tx_stop_cbfn(tx, status)\
3106 if ((tx)->stop_cbfn)\
3107 (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
3108 (tx)->stop_cbfn = NULL;\
3109 (tx)->stop_cbarg = NULL;\
3112 #define call_tx_prio_change_cbfn(tx, status)\
3114 if ((tx)->prio_change_cbfn)\
3115 (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
3116 (tx)->prio_change_cbfn = NULL;\
3119 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
3120 enum bna_cb_status status);
3121 static void bna_tx_cb_txq_stopped(void *arg, int status);
3122 static void bna_tx_cb_stats_cleared(void *arg, int status);
3123 static void __bna_tx_stop(struct bna_tx *tx);
3124 static void __bna_tx_start(struct bna_tx *tx);
3125 static void __bna_txf_stat_clr(struct bna_tx *tx);
3131 TX_E_TXQ_STOPPED = 4,
3132 TX_E_PRIO_CHANGE = 5,
3133 TX_E_STAT_CLEARED = 6,
3139 BNA_TX_TXQ_STOP_WAIT = 3,
3140 BNA_TX_PRIO_STOP_WAIT = 4,
3141 BNA_TX_STAT_CLR_WAIT = 5,
3144 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
3146 bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
3148 bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
3150 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3152 bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
3155 static struct bfa_sm_table tx_sm_table[] = {
3156 {BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
3157 {BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
3158 {BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
3159 {BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
3160 {BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
3164 bna_tx_sm_stopped_entry(struct bna_tx *tx)
3166 struct bna_txq *txq;
3167 struct list_head *qe;
3169 list_for_each(qe, &tx->txq_q) {
3170 txq = (struct bna_txq *)qe;
3171 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3174 call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
3178 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3182 bfa_fsm_set_state(tx, bna_tx_sm_started);
3186 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3193 case TX_E_PRIO_CHANGE:
3194 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3197 case TX_E_TXQ_STOPPED:
3199 * This event is received due to flushing of mbox when
3206 bfa_sm_fault(tx->bna, event);
3211 bna_tx_sm_started_entry(struct bna_tx *tx)
3213 struct bna_txq *txq;
3214 struct list_head *qe;
3219 list_for_each(qe, &tx->txq_q) {
3220 txq = (struct bna_txq *)qe;
3221 bna_ib_ack(&txq->ib->door_bell, 0);
3226 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3228 struct bna_txq *txq;
3229 struct list_head *qe;
3233 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3238 list_for_each(qe, &tx->txq_q) {
3239 txq = (struct bna_txq *)qe;
3240 bna_ib_fail(txq->ib);
3241 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3243 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3246 case TX_E_PRIO_CHANGE:
3247 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3251 bfa_sm_fault(tx->bna, event);
3256 bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
3261 bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3263 struct bna_txq *txq;
3264 struct list_head *qe;
3268 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3271 case TX_E_TXQ_STOPPED:
3272 list_for_each(qe, &tx->txq_q) {
3273 txq = (struct bna_txq *)qe;
3274 bna_ib_stop(txq->ib);
3276 bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
3279 case TX_E_PRIO_CHANGE:
3284 bfa_sm_fault(tx->bna, event);
3289 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3295 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3297 struct bna_txq *txq;
3298 struct list_head *qe;
3302 bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
3306 call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
3307 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3310 case TX_E_TXQ_STOPPED:
3311 list_for_each(qe, &tx->txq_q) {
3312 txq = (struct bna_txq *)qe;
3313 bna_ib_stop(txq->ib);
3314 (tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
3316 call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
3317 bfa_fsm_set_state(tx, bna_tx_sm_started);
3320 case TX_E_PRIO_CHANGE:
3325 bfa_sm_fault(tx->bna, event);
3330 bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
3332 __bna_txf_stat_clr(tx);
3336 bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
3340 case TX_E_STAT_CLEARED:
3341 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3345 bfa_sm_fault(tx->bna, event);
3350 __bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
3352 struct bna_rxtx_q_mem *q_mem;
3353 struct bna_txq_mem txq_cfg;
3354 struct bna_txq_mem *txq_mem;
3355 struct bna_dma_addr cur_q_addr;
3357 void __iomem *base_addr;
3360 /* Fill out structure, to be subsequently written to hardware */
3361 txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
3362 txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
3363 cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
3364 txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
3365 txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
3367 txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
3369 txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
3370 (txq->qpt.page_size >> 2);
3371 txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
3372 ((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
3374 txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
3375 txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
3376 (txq->priority & 0x3));
3377 txq_cfg.wvc_n_cquota_n_rquota =
3378 ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
3379 (BFI_TX_MAX_WRR_QUOTA & 0xfff));
3381 /* Setup the page and write to H/W */
3383 pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
3384 HQM_RXTX_Q_RAM_BASE_OFFSET);
3385 writel(pg_num, tx->bna->regs.page_addr);
3387 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3388 HQM_RXTX_Q_RAM_BASE_OFFSET);
3389 q_mem = (struct bna_rxtx_q_mem *)0;
3390 txq_mem = &q_mem[txq->txq_id].txq;
3393 * The following 4 lines, is a hack b'cos the H/W needs to read
3394 * these DMA addresses as little endian
3397 off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
3398 writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
3400 off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
3401 writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
3403 off = (unsigned long)&txq_mem->cur_q_entry_lo;
3404 writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
3406 off = (unsigned long)&txq_mem->cur_q_entry_hi;
3407 writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
3409 off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
3410 writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
3412 off = (unsigned long)&txq_mem->entry_n_pg_size;
3413 writel(txq_cfg.entry_n_pg_size, base_addr + off);
3415 off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
3416 writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
3418 off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
3419 writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
3421 off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
3422 writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
3424 off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
3425 writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
3427 txq->tcb->producer_index = 0;
3428 txq->tcb->consumer_index = 0;
3429 *(txq->tcb->hw_consumer_index) = 0;
3434 __bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
3436 struct bfi_ll_q_stop_req ll_req;
3437 u32 bit_mask[2] = {0, 0};
3438 if (txq->txq_id < 32)
3439 bit_mask[0] = (u32)1 << txq->txq_id;
3441 bit_mask[1] = (u32)1 << (txq->txq_id - 32);
3443 memset(&ll_req, 0, sizeof(ll_req));
3444 ll_req.mh.msg_class = BFI_MC_LL;
3445 ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
3446 ll_req.mh.mtag.h2i.lpu_id = 0;
3447 ll_req.q_id_mask[0] = htonl(bit_mask[0]);
3448 ll_req.q_id_mask[1] = htonl(bit_mask[1]);
3450 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3451 bna_tx_cb_txq_stopped, tx);
3453 bna_mbox_send(tx->bna, &tx->mbox_qe);
3457 __bna_txf_start(struct bna_tx *tx)
3459 struct bna_tx_fndb_ram *tx_fndb;
3460 struct bna_txf *txf = &tx->txf;
3461 void __iomem *base_addr;
3464 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3465 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
3466 tx->bna->regs.page_addr);
3468 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3469 TX_FNDB_RAM_BASE_OFFSET);
3471 tx_fndb = (struct bna_tx_fndb_ram *)0;
3472 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3474 writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
3477 if (tx->txf.txf_id < 32)
3478 tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
3480 tx->bna->tx_mod.txf_bmap[1] |= ((u32)
3481 1 << (tx->txf.txf_id - 32));
3485 __bna_txf_stop(struct bna_tx *tx)
3487 struct bna_tx_fndb_ram *tx_fndb;
3490 struct bna_txf *txf = &tx->txf;
3491 void __iomem *base_addr;
3494 /* retrieve the running txf_flags & turn off enable bit */
3495 page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
3496 (tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
3497 writel(page_num, tx->bna->regs.page_addr);
3499 base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
3500 TX_FNDB_RAM_BASE_OFFSET);
3501 tx_fndb = (struct bna_tx_fndb_ram *)0;
3502 off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
3504 ctl_flags = readl(base_addr + off);
3505 ctl_flags &= ~BFI_TXF_CF_ENABLE;
3507 writel(ctl_flags, base_addr + off);
3509 if (tx->txf.txf_id < 32)
3510 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
3512 tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
3513 1 << (tx->txf.txf_id - 32));
3517 __bna_txf_stat_clr(struct bna_tx *tx)
3519 struct bfi_ll_stats_req ll_req;
3520 u32 txf_bmap[2] = {0, 0};
3521 if (tx->txf.txf_id < 32)
3522 txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
3524 txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
3525 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
3526 ll_req.stats_mask = 0;
3527 ll_req.rxf_id_mask[0] = 0;
3528 ll_req.rxf_id_mask[1] = 0;
3529 ll_req.txf_id_mask[0] = htonl(txf_bmap[0]);
3530 ll_req.txf_id_mask[1] = htonl(txf_bmap[1]);
3532 bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
3533 bna_tx_cb_stats_cleared, tx);
3534 bna_mbox_send(tx->bna, &tx->mbox_qe);
3538 __bna_tx_start(struct bna_tx *tx)
3540 struct bna_txq *txq;
3541 struct list_head *qe;
3543 list_for_each(qe, &tx->txq_q) {
3544 txq = (struct bna_txq *)qe;
3545 bna_ib_start(txq->ib);
3546 __bna_txq_start(tx, txq);
3549 __bna_txf_start(tx);
3551 list_for_each(qe, &tx->txq_q) {
3552 txq = (struct bna_txq *)qe;
3553 txq->tcb->priority = txq->priority;
3554 (tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
3559 __bna_tx_stop(struct bna_tx *tx)
3561 struct bna_txq *txq;
3562 struct list_head *qe;
3564 list_for_each(qe, &tx->txq_q) {
3565 txq = (struct bna_txq *)qe;
3566 (tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
3571 list_for_each(qe, &tx->txq_q) {
3572 txq = (struct bna_txq *)qe;
3573 bfa_wc_up(&tx->txq_stop_wc);
3576 list_for_each(qe, &tx->txq_q) {
3577 txq = (struct bna_txq *)qe;
3578 __bna_txq_stop(tx, txq);
3583 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3584 struct bna_mem_descr *qpt_mem,
3585 struct bna_mem_descr *swqpt_mem,
3586 struct bna_mem_descr *page_mem)
3590 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3591 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3592 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3593 txq->qpt.page_count = page_count;
3594 txq->qpt.page_size = page_size;
3596 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3598 for (i = 0; i < page_count; i++) {
3599 txq->tcb->sw_qpt[i] = page_mem[i].kva;
3601 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3602 page_mem[i].dma.lsb;
3603 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3604 page_mem[i].dma.msb;
3610 bna_tx_free(struct bna_tx *tx)
3612 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3613 struct bna_txq *txq;
3614 struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
3615 struct list_head *qe;
3617 while (!list_empty(&tx->txq_q)) {
3618 bfa_q_deq(&tx->txq_q, &txq);
3619 bfa_q_qe_init(&txq->qe);
3621 if (txq->ib_seg_offset != -1)
3622 bna_ib_release_idx(txq->ib,
3623 txq->ib_seg_offset);
3624 bna_ib_put(ib_mod, txq->ib);
3629 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3632 list_for_each(qe, &tx_mod->tx_active_q) {
3633 if (qe == &tx->qe) {
3635 bfa_q_qe_init(&tx->qe);
3642 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3646 bna_tx_cb_txq_stopped(void *arg, int status)
3648 struct bna_tx *tx = (struct bna_tx *)arg;
3650 bfa_q_qe_init(&tx->mbox_qe.qe);
3651 bfa_wc_down(&tx->txq_stop_wc);
3655 bna_tx_cb_txq_stopped_all(void *arg)
3657 struct bna_tx *tx = (struct bna_tx *)arg;
3659 bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
3663 bna_tx_cb_stats_cleared(void *arg, int status)
3665 struct bna_tx *tx = (struct bna_tx *)arg;
3667 bfa_q_qe_init(&tx->mbox_qe.qe);
3669 bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
3673 bna_tx_start(struct bna_tx *tx)
3675 tx->flags |= BNA_TX_F_PORT_STARTED;
3676 if (tx->flags & BNA_TX_F_ENABLED)
3677 bfa_fsm_send_event(tx, TX_E_START);
3681 bna_tx_stop(struct bna_tx *tx)
3683 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3684 tx->stop_cbarg = &tx->bna->tx_mod;
3686 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3687 bfa_fsm_send_event(tx, TX_E_STOP);
3691 bna_tx_fail(struct bna_tx *tx)
3693 tx->flags &= ~BNA_TX_F_PORT_STARTED;
3694 bfa_fsm_send_event(tx, TX_E_FAIL);
3698 bna_tx_prio_changed(struct bna_tx *tx, int prio)
3700 struct bna_txq *txq;
3701 struct list_head *qe;
3703 list_for_each(qe, &tx->txq_q) {
3704 txq = (struct bna_txq *)qe;
3705 txq->priority = prio;
3708 bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
3712 bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
3715 tx->flags |= BNA_TX_F_PRIO_LOCK;
3717 tx->flags &= ~BNA_TX_F_PRIO_LOCK;
3721 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
3722 enum bna_cb_status status)
3724 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3726 bfa_wc_down(&tx_mod->tx_stop_wc);
3730 bna_tx_mod_cb_tx_stopped_all(void *arg)
3732 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3734 if (tx_mod->stop_cbfn)
3735 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
3736 tx_mod->stop_cbfn = NULL;
3740 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3744 struct bna_mem_info *mem_info;
3746 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3747 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3748 mem_info->mem_type = BNA_MEM_T_KVA;
3749 mem_info->len = sizeof(struct bna_tcb);
3750 mem_info->num = num_txq;
3752 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3753 q_size = ALIGN(q_size, PAGE_SIZE);
3754 page_count = q_size >> PAGE_SHIFT;
3756 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3757 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3758 mem_info->mem_type = BNA_MEM_T_DMA;
3759 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3760 mem_info->num = num_txq;
3762 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3763 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3764 mem_info->mem_type = BNA_MEM_T_KVA;
3765 mem_info->len = page_count * sizeof(void *);
3766 mem_info->num = num_txq;
3768 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3769 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3770 mem_info->mem_type = BNA_MEM_T_DMA;
3771 mem_info->len = PAGE_SIZE;
3772 mem_info->num = num_txq * page_count;
3774 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3775 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3777 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3781 bna_tx_create(struct bna *bna, struct bnad *bnad,
3782 struct bna_tx_config *tx_cfg,
3783 struct bna_tx_event_cbfn *tx_cbfn,
3784 struct bna_res_info *res_info, void *priv)
3786 struct bna_intr_info *intr_info;
3787 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3789 struct bna_txq *txq;
3790 struct list_head *qe;
3791 struct bna_ib_mod *ib_mod = &bna->ib_mod;
3792 struct bna_doorbell_qset *qset;
3793 struct bna_ib_config ib_config;
3800 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3801 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
3803 page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
3809 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3814 if (list_empty(&tx_mod->tx_free_q))
3816 bfa_q_deq(&tx_mod->tx_free_q, &tx);
3817 bfa_q_qe_init(&tx->qe);
3821 INIT_LIST_HEAD(&tx->txq_q);
3822 for (i = 0; i < tx_cfg->num_txq; i++) {
3823 if (list_empty(&tx_mod->txq_free_q))
3826 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3827 bfa_q_qe_init(&txq->qe);
3828 list_add_tail(&txq->qe, &tx->txq_q);
3830 txq->ib_seg_offset = -1;
3836 list_for_each(qe, &tx->txq_q) {
3837 txq = (struct bna_txq *)qe;
3839 if (intr_info->num == 1)
3840 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3841 intr_info->idl[0].vector);
3843 txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
3844 intr_info->idl[i].vector);
3846 if (txq->ib == NULL)
3849 txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
3850 if (txq->ib_seg_offset == -1)
3862 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3863 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3864 /* Following callbacks are mandatory */
3865 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3866 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3867 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3869 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3872 tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
3873 tx->txq_stop_wc.wc_cbarg = tx;
3874 tx->txq_stop_wc.wc_count = 0;
3876 tx->type = tx_cfg->tx_type;
3879 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
3881 case BNA_TX_T_REGULAR:
3882 if (!(tx->bna->tx_mod.flags &
3883 BNA_TX_MOD_F_PORT_LOOPBACK))
3884 tx->flags |= BNA_TX_F_PORT_STARTED;
3886 case BNA_TX_T_LOOPBACK:
3887 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
3888 tx->flags |= BNA_TX_F_PORT_STARTED;
3892 if (tx->bna->tx_mod.cee_link)
3893 tx->flags |= BNA_TX_F_PRIO_LOCK;
3899 list_for_each(qe, &tx->txq_q) {
3900 txq = (struct bna_txq *)qe;
3901 txq->priority = tx_mod->priority;
3902 txq->tcb = (struct bna_tcb *)
3903 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3904 txq->tx_packets = 0;
3909 ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3910 ib_config.interpkt_timeo = 0; /* Not used */
3911 ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
3912 ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
3913 BFI_IB_CF_INT_ENABLE |
3914 BFI_IB_CF_COALESCING_MODE);
3915 bna_ib_config(txq->ib, &ib_config);
3919 txq->tcb->producer_index = 0;
3920 txq->tcb->consumer_index = 0;
3921 txq->tcb->hw_consumer_index = (volatile u32 *)
3922 ((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
3923 (txq->ib_seg_offset * BFI_IBIDX_SIZE));
3924 *(txq->tcb->hw_consumer_index) = 0;
3925 txq->tcb->q_depth = tx_cfg->txq_depth;
3926 txq->tcb->unmap_q = (void *)
3927 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3928 qset = (struct bna_doorbell_qset *)0;
3929 off = (unsigned long)&qset[txq->txq_id].txq[0];
3930 txq->tcb->q_dbell = off +
3931 BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
3932 txq->tcb->i_dbell = &txq->ib->door_bell;
3933 txq->tcb->intr_type = intr_info->intr_type;
3934 txq->tcb->intr_vector = (intr_info->num == 1) ?
3935 intr_info->idl[0].vector :
3936 intr_info->idl[i].vector;
3937 txq->tcb->txq = txq;
3938 txq->tcb->bnad = bnad;
3941 /* QPT, SWQPT, Pages */
3942 bna_txq_qpt_setup(txq, page_count, page_size,
3943 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3944 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3945 &res_info[BNA_TX_RES_MEM_T_PAGE].
3946 res_u.mem_info.mdl[page_idx]);
3947 txq->tcb->page_idx = page_idx;
3948 txq->tcb->page_count = page_count;
3949 page_idx += page_count;
3951 /* Callback to bnad for setting up TCB */
3952 if (tx->tcb_setup_cbfn)
3953 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3960 tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
3964 bfa_q_qe_init(&tx->mbox_qe.qe);
3966 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3976 bna_tx_destroy(struct bna_tx *tx)
3978 /* Callback to bnad for destroying TCB */
3979 if (tx->tcb_destroy_cbfn) {
3980 struct bna_txq *txq;
3981 struct list_head *qe;
3983 list_for_each(qe, &tx->txq_q) {
3984 txq = (struct bna_txq *)qe;
3985 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3993 bna_tx_enable(struct bna_tx *tx)
3995 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3998 tx->flags |= BNA_TX_F_ENABLED;
4000 if (tx->flags & BNA_TX_F_PORT_STARTED)
4001 bfa_fsm_send_event(tx, TX_E_START);
4005 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
4006 void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
4008 if (type == BNA_SOFT_CLEANUP) {
4009 (*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
4013 tx->stop_cbfn = cbfn;
4014 tx->stop_cbarg = tx->bna->bnad;
4016 tx->flags &= ~BNA_TX_F_ENABLED;
4018 bfa_fsm_send_event(tx, TX_E_STOP);
4022 bna_tx_state_get(struct bna_tx *tx)
4024 return bfa_sm_to_state(tx_sm_table, tx->fsm);
4028 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
4029 struct bna_res_info *res_info)
4036 tx_mod->tx = (struct bna_tx *)
4037 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
4038 tx_mod->txq = (struct bna_txq *)
4039 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
4041 INIT_LIST_HEAD(&tx_mod->tx_free_q);
4042 INIT_LIST_HEAD(&tx_mod->tx_active_q);
4044 INIT_LIST_HEAD(&tx_mod->txq_free_q);
4046 for (i = 0; i < BFI_MAX_TXQ; i++) {
4047 tx_mod->tx[i].txf.txf_id = i;
4048 bfa_q_qe_init(&tx_mod->tx[i].qe);
4049 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
4051 tx_mod->txq[i].txq_id = i;
4052 bfa_q_qe_init(&tx_mod->txq[i].qe);
4053 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
4056 tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
4057 tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
4058 tx_mod->tx_stop_wc.wc_count = 0;
4062 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
4064 struct list_head *qe;
4068 list_for_each(qe, &tx_mod->tx_free_q)
4072 list_for_each(qe, &tx_mod->txq_free_q)
4079 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4082 struct list_head *qe;
4084 tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
4085 if (type == BNA_TX_T_LOOPBACK)
4086 tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
4088 list_for_each(qe, &tx_mod->tx_active_q) {
4089 tx = (struct bna_tx *)qe;
4090 if (tx->type == type)
4096 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
4099 struct list_head *qe;
4101 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4102 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4104 tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
4107 * Before calling bna_tx_stop(), increment tx_stop_wc as many times
4108 * as we are going to call bna_tx_stop
4110 list_for_each(qe, &tx_mod->tx_active_q) {
4111 tx = (struct bna_tx *)qe;
4112 if (tx->type == type)
4113 bfa_wc_up(&tx_mod->tx_stop_wc);
4116 if (tx_mod->tx_stop_wc.wc_count == 0) {
4117 tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
4118 tx_mod->stop_cbfn = NULL;
4122 list_for_each(qe, &tx_mod->tx_active_q) {
4123 tx = (struct bna_tx *)qe;
4124 if (tx->type == type)
4130 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
4133 struct list_head *qe;
4135 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
4136 tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
4138 list_for_each(qe, &tx_mod->tx_active_q) {
4139 tx = (struct bna_tx *)qe;
4145 bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
4148 struct list_head *qe;
4150 if (prio != tx_mod->priority) {
4151 tx_mod->priority = prio;
4153 list_for_each(qe, &tx_mod->tx_active_q) {
4154 tx = (struct bna_tx *)qe;
4155 bna_tx_prio_changed(tx, prio);
4161 bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
4164 struct list_head *qe;
4166 tx_mod->cee_link = cee_link;
4168 list_for_each(qe, &tx_mod->tx_active_q) {
4169 tx = (struct bna_tx *)qe;
4170 bna_tx_cee_link_status(tx, cee_link);