1 /* bnx2x_vfpf.c: Broadcom Everest network driver.
3 * Copyright 2009-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
21 #include "bnx2x_cmn.h"
22 #include <linux/crc32.h>
24 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
26 /* place a given tlv on the tlv buffer at a given offset */
27 static void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list,
28 u16 offset, u16 type, u16 length)
30 struct channel_tlv *tl =
31 (struct channel_tlv *)(tlvs_list + offset);
37 /* Clear the mailbox and init the header of the first tlv */
38 static void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
41 mutex_lock(&bp->vf2pf_mutex);
43 DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
47 memset(bp->vf2pf_mbox, 0, sizeof(struct bnx2x_vf_mbx_msg));
49 /* init type and length */
50 bnx2x_add_tlv(bp, &first_tlv->tl, 0, type, length);
52 /* init first tlv header */
53 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
56 /* releases the mailbox */
57 static void bnx2x_vfpf_finalize(struct bnx2x *bp,
58 struct vfpf_first_tlv *first_tlv)
60 DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
63 mutex_unlock(&bp->vf2pf_mutex);
66 /* Finds a TLV by type in a TLV buffer; If found, returns pointer to the TLV */
67 static void *bnx2x_search_tlv_list(struct bnx2x *bp, void *tlvs_list,
68 enum channel_tlvs req_tlv)
70 struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
73 if (tlv->type == req_tlv)
77 BNX2X_ERR("Found TLV with length 0\n");
81 tlvs_list += tlv->length;
82 tlv = (struct channel_tlv *)tlvs_list;
83 } while (tlv->type != CHANNEL_TLV_LIST_END);
85 DP(BNX2X_MSG_IOV, "TLV list does not contain %d TLV\n", req_tlv);
90 /* list the types and lengths of the tlvs on the buffer */
91 static void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
94 struct channel_tlv *tlv = (struct channel_tlv *)tlvs_list;
96 while (tlv->type != CHANNEL_TLV_LIST_END) {
98 DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
99 tlv->type, tlv->length);
101 /* advance to next tlv */
102 tlvs_list += tlv->length;
104 /* cast general tlv list pointer to channel tlv header*/
105 tlv = (struct channel_tlv *)tlvs_list;
109 /* break condition for this loop */
110 if (i > MAX_TLVS_IN_LIST) {
111 WARN(true, "corrupt tlvs");
116 /* output last tlv */
117 DP(BNX2X_MSG_IOV, "TLV number %d: type %d, length %d\n", i,
118 tlv->type, tlv->length);
121 /* test whether we support a tlv type */
122 bool bnx2x_tlv_supported(u16 tlvtype)
124 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
127 static inline int bnx2x_pfvf_status_codes(int rc)
131 return PFVF_STATUS_SUCCESS;
133 return PFVF_STATUS_NO_RESOURCE;
135 return PFVF_STATUS_FAILURE;
139 static int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
141 struct cstorm_vf_zone_data __iomem *zone_data =
142 REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
143 int tout = 100, interval = 100; /* wait for 10 seconds */
146 BNX2X_ERR("done was non zero before message to pf was sent\n");
151 /* if PF indicated channel is down avoid sending message. Return success
152 * so calling flow can continue
154 bnx2x_sample_bulletin(bp);
155 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
156 DP(BNX2X_MSG_IOV, "detecting channel down. Aborting message\n");
157 *done = PFVF_STATUS_SUCCESS;
161 /* Write message address */
162 writel(U64_LO(msg_mapping),
163 &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
164 writel(U64_HI(msg_mapping),
165 &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
167 /* make sure the address is written before FW accesses it */
170 /* Trigger the PF FW */
171 writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
173 /* Wait for PF to complete */
174 while ((tout >= 0) && (!*done)) {
178 /* progress indicator - HV can take its own sweet time in
181 DP_CONT(BNX2X_MSG_IOV, ".");
185 BNX2X_ERR("PF response has timed out\n");
188 DP(BNX2X_MSG_SP, "Got a response from PF\n");
192 static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
195 int tout = 10, interval = 100; /* Wait for 1 sec */
198 /* pxp traps vf read of doorbells and returns me reg value */
199 me_reg = readl(bp->doorbells);
200 if (GOOD_ME_REG(me_reg))
205 BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
207 } while (tout-- > 0);
209 if (!GOOD_ME_REG(me_reg)) {
210 BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
214 DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
216 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
221 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
223 int rc = 0, attempts = 0;
224 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
225 struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
226 struct vfpf_port_phys_id_resp_tlv *phys_port_resp;
228 bool resources_acquired = false;
230 /* clear mailbox and prep first tlv */
231 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
233 if (bnx2x_get_vf_id(bp, &vf_id)) {
238 req->vfdev_info.vf_id = vf_id;
239 req->vfdev_info.vf_os = 0;
241 req->resc_request.num_rxqs = rx_count;
242 req->resc_request.num_txqs = tx_count;
243 req->resc_request.num_sbs = bp->igu_sb_cnt;
244 req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
245 req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
247 /* pf 2 vf bulletin board address */
248 req->bulletin_addr = bp->pf2vf_bulletin_mapping;
250 /* Request physical port identifier */
251 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
252 CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
254 /* add list termination tlv */
255 bnx2x_add_tlv(bp, req,
256 req->first_tlv.tl.length + sizeof(struct channel_tlv),
257 CHANNEL_TLV_LIST_END,
258 sizeof(struct channel_list_end_tlv));
260 /* output tlvs list */
261 bnx2x_dp_tlv_list(bp, req);
263 while (!resources_acquired) {
264 DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
266 /* send acquire request */
267 rc = bnx2x_send_msg2pf(bp,
269 bp->vf2pf_mbox_mapping);
275 /* copy acquire response from buffer to bp */
276 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
280 /* test whether the PF accepted our request. If not, humble
281 * the request and try again.
283 if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
284 DP(BNX2X_MSG_SP, "resources acquired\n");
285 resources_acquired = true;
286 } else if (bp->acquire_resp.hdr.status ==
287 PFVF_STATUS_NO_RESOURCE &&
288 attempts < VF_ACQUIRE_THRESH) {
290 "PF unwilling to fulfill resource request. Try PF recommended amount\n");
292 /* humble our request */
293 req->resc_request.num_txqs =
294 min(req->resc_request.num_txqs,
295 bp->acquire_resp.resc.num_txqs);
296 req->resc_request.num_rxqs =
297 min(req->resc_request.num_rxqs,
298 bp->acquire_resp.resc.num_rxqs);
299 req->resc_request.num_sbs =
300 min(req->resc_request.num_sbs,
301 bp->acquire_resp.resc.num_sbs);
302 req->resc_request.num_mac_filters =
303 min(req->resc_request.num_mac_filters,
304 bp->acquire_resp.resc.num_mac_filters);
305 req->resc_request.num_vlan_filters =
306 min(req->resc_request.num_vlan_filters,
307 bp->acquire_resp.resc.num_vlan_filters);
308 req->resc_request.num_mc_filters =
309 min(req->resc_request.num_mc_filters,
310 bp->acquire_resp.resc.num_mc_filters);
312 /* Clear response buffer */
313 memset(&bp->vf2pf_mbox->resp, 0,
314 sizeof(union pfvf_tlvs));
316 /* PF reports error */
317 BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
318 bp->acquire_resp.hdr.status);
324 /* Retrieve physical port id (if possible) */
325 phys_port_resp = (struct vfpf_port_phys_id_resp_tlv *)
326 bnx2x_search_tlv_list(bp, resp,
327 CHANNEL_TLV_PHYS_PORT_ID);
328 if (phys_port_resp) {
329 memcpy(bp->phys_port_id, phys_port_resp->id, ETH_ALEN);
330 bp->flags |= HAS_PHYS_PORT_ID;
334 bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
335 bp->link_params.chip_id = bp->common.chip_id;
336 bp->db_size = bp->acquire_resp.pfdev_info.db_size;
337 bp->common.int_block = INT_BLOCK_IGU;
338 bp->common.chip_port_mode = CHIP_2_PORT_MODE;
342 bp->common.flash_size = 0;
344 NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
345 bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
346 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
347 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
350 if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
351 memcpy(bp->dev->dev_addr,
352 bp->acquire_resp.resc.current_mac_addr,
356 bnx2x_vfpf_finalize(bp, &req->first_tlv);
360 int bnx2x_vfpf_release(struct bnx2x *bp)
362 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
363 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
366 /* clear mailbox and prep first tlv */
367 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
369 if (bnx2x_get_vf_id(bp, &vf_id)) {
376 /* add list termination tlv */
377 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
378 sizeof(struct channel_list_end_tlv));
380 /* output tlvs list */
381 bnx2x_dp_tlv_list(bp, req);
383 /* send release request */
384 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
390 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
392 DP(BNX2X_MSG_SP, "vf released\n");
394 /* PF reports error */
395 BNX2X_ERR("PF failed our release request - are we out of sync? Response status: %d\n",
401 bnx2x_vfpf_finalize(bp, &req->first_tlv);
406 /* Tell PF about SB addresses */
407 int bnx2x_vfpf_init(struct bnx2x *bp)
409 struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
410 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
413 /* clear mailbox and prep first tlv */
414 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
417 for_each_eth_queue(bp, i)
418 req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
421 /* statistics - requests only supports single queue for now */
422 req->stats_addr = bp->fw_stats_data_mapping +
423 offsetof(struct bnx2x_fw_stats_data, queue_stats);
425 req->stats_stride = sizeof(struct per_queue_stats);
427 /* add list termination tlv */
428 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
429 sizeof(struct channel_list_end_tlv));
431 /* output tlvs list */
432 bnx2x_dp_tlv_list(bp, req);
434 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
438 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
439 BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
445 DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
447 bnx2x_vfpf_finalize(bp, &req->first_tlv);
452 /* CLOSE VF - opposite to INIT_VF */
453 void bnx2x_vfpf_close_vf(struct bnx2x *bp)
455 struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
456 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
460 /* If we haven't got a valid VF id, there is no sense to
461 * continue with sending messages
463 if (bnx2x_get_vf_id(bp, &vf_id))
466 /* Close the queues */
467 for_each_queue(bp, i)
468 bnx2x_vfpf_teardown_queue(bp, i);
471 bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index, false);
473 /* clear mailbox and prep first tlv */
474 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
478 /* add list termination tlv */
479 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
480 sizeof(struct channel_list_end_tlv));
482 /* output tlvs list */
483 bnx2x_dp_tlv_list(bp, req);
485 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
488 BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
490 else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
491 BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
494 bnx2x_vfpf_finalize(bp, &req->first_tlv);
497 /* Disable HW interrupts, NAPI */
498 bnx2x_netif_stop(bp, 0);
499 /* Delete all NAPI objects */
500 bnx2x_del_all_napi(bp);
506 static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
507 struct bnx2x_vf_queue *q)
509 u8 cl_id = vfq_cl_id(vf, q);
510 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
513 bnx2x_init_mac_obj(bp, &q->mac_obj,
514 cl_id, q->cid, func_id,
515 bnx2x_vf_sp(bp, vf, mac_rdata),
516 bnx2x_vf_sp_map(bp, vf, mac_rdata),
517 BNX2X_FILTER_MAC_PENDING,
519 BNX2X_OBJ_TYPE_RX_TX,
522 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
523 cl_id, q->cid, func_id,
524 bnx2x_vf_sp(bp, vf, vlan_rdata),
525 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
526 BNX2X_FILTER_VLAN_PENDING,
528 BNX2X_OBJ_TYPE_RX_TX,
532 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
533 q->cid, func_id, func_id,
534 bnx2x_vf_sp(bp, vf, mcast_rdata),
535 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
536 BNX2X_FILTER_MCAST_PENDING,
538 BNX2X_OBJ_TYPE_RX_TX);
541 bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
543 bnx2x_vf_sp(bp, vf, rss_rdata),
544 bnx2x_vf_sp_map(bp, vf, rss_rdata),
545 BNX2X_FILTER_RSS_CONF_PENDING,
547 BNX2X_OBJ_TYPE_RX_TX);
549 vf->leading_rss = cl_id;
550 q->is_leading = true;
551 q->sp_initialized = true;
554 /* ask the pf to open a queue for the vf */
555 int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
558 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
559 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
560 u8 fp_idx = fp->index;
561 u16 tpa_agg_size = 0, flags = 0;
564 /* clear mailbox and prep first tlv */
565 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
567 /* select tpa mode to request */
568 if (!fp->disable_tpa) {
569 flags |= VFPF_QUEUE_FLG_TPA;
570 flags |= VFPF_QUEUE_FLG_TPA_IPV6;
571 if (fp->mode == TPA_MODE_GRO)
572 flags |= VFPF_QUEUE_FLG_TPA_GRO;
573 tpa_agg_size = TPA_AGG_SIZE;
577 flags |= VFPF_QUEUE_FLG_LEADING_RSS;
579 /* calculate queue flags */
580 flags |= VFPF_QUEUE_FLG_STATS;
581 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
582 flags |= VFPF_QUEUE_FLG_VLAN;
583 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
586 req->vf_qid = fp_idx;
587 req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
590 req->rxq.rcq_addr = fp->rx_comp_mapping;
591 req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
592 req->rxq.rxq_addr = fp->rx_desc_mapping;
593 req->rxq.sge_addr = fp->rx_sge_mapping;
594 req->rxq.vf_sb = fp_idx;
595 req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
596 req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
597 req->rxq.mtu = bp->dev->mtu;
598 req->rxq.buf_sz = fp->rx_buf_size;
599 req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
600 req->rxq.tpa_agg_sz = tpa_agg_size;
601 req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
602 req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
603 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
604 req->rxq.flags = flags;
605 req->rxq.drop_flags = 0;
606 req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
607 req->rxq.stat_id = -1; /* No stats at the moment */
610 req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
611 req->txq.vf_sb = fp_idx;
612 req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
613 req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
614 req->txq.flags = flags;
615 req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
617 /* add list termination tlv */
618 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
619 sizeof(struct channel_list_end_tlv));
621 /* output tlvs list */
622 bnx2x_dp_tlv_list(bp, req);
624 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
626 BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
629 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
630 BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
631 fp_idx, resp->hdr.status);
635 bnx2x_vfpf_finalize(bp, &req->first_tlv);
640 static int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
642 struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
643 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
646 /* clear mailbox and prep first tlv */
647 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
652 /* add list termination tlv */
653 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
654 sizeof(struct channel_list_end_tlv));
656 /* output tlvs list */
657 bnx2x_dp_tlv_list(bp, req);
659 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
662 BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
667 /* PF failed the transaction */
668 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
669 BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
675 bnx2x_vfpf_finalize(bp, &req->first_tlv);
680 /* request pf to add a mac for the vf */
681 int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
683 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
684 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
685 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
688 /* clear mailbox and prep first tlv */
689 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
692 req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
693 req->vf_qid = vf_qid;
694 req->n_mac_vlan_filters = 1;
696 req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID;
698 req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC;
700 /* sample bulletin board for new mac */
701 bnx2x_sample_bulletin(bp);
703 /* copy mac from device to request */
704 memcpy(req->filters[0].mac, addr, ETH_ALEN);
706 /* add list termination tlv */
707 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
708 sizeof(struct channel_list_end_tlv));
710 /* output tlvs list */
711 bnx2x_dp_tlv_list(bp, req);
713 /* send message to pf */
714 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
716 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
720 /* failure may mean PF was configured with a new mac for us */
721 while (resp->hdr.status == PFVF_STATUS_FAILURE) {
723 "vfpf SET MAC failed. Check bulletin board for new posts\n");
725 /* copy mac from bulletin to device */
726 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
728 /* check if bulletin board was updated */
729 if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
730 /* copy mac from device to request */
731 memcpy(req->filters[0].mac, bp->dev->dev_addr,
734 /* send message to pf */
735 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status,
736 bp->vf2pf_mbox_mapping);
738 /* no new info in bulletin */
743 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
744 BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
748 bnx2x_vfpf_finalize(bp, &req->first_tlv);
753 /* request pf to config rss table for vf queues*/
754 int bnx2x_vfpf_config_rss(struct bnx2x *bp,
755 struct bnx2x_config_rss_params *params)
757 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
758 struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
761 /* clear mailbox and prep first tlv */
762 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
765 /* add list termination tlv */
766 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
767 sizeof(struct channel_list_end_tlv));
769 memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
770 memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
771 req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
772 req->rss_key_size = T_ETH_RSS_KEY;
773 req->rss_result_mask = params->rss_result_mask;
775 /* flags handled individually for backward/forward compatability */
776 if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
777 req->rss_flags |= VFPF_RSS_MODE_DISABLED;
778 if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
779 req->rss_flags |= VFPF_RSS_MODE_REGULAR;
780 if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
781 req->rss_flags |= VFPF_RSS_SET_SRCH;
782 if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
783 req->rss_flags |= VFPF_RSS_IPV4;
784 if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
785 req->rss_flags |= VFPF_RSS_IPV4_TCP;
786 if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
787 req->rss_flags |= VFPF_RSS_IPV4_UDP;
788 if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
789 req->rss_flags |= VFPF_RSS_IPV6;
790 if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
791 req->rss_flags |= VFPF_RSS_IPV6_TCP;
792 if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
793 req->rss_flags |= VFPF_RSS_IPV6_UDP;
795 DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
797 /* output tlvs list */
798 bnx2x_dp_tlv_list(bp, req);
800 /* send message to pf */
801 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
803 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
807 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
808 /* Since older drivers don't support this feature (and VF has
809 * no way of knowing other than failing this), don't propagate
810 * an error in this case.
813 "Failed to send rss message to PF over VF-PF channel [%d]\n",
817 bnx2x_vfpf_finalize(bp, &req->first_tlv);
822 int bnx2x_vfpf_set_mcast(struct net_device *dev)
824 struct bnx2x *bp = netdev_priv(dev);
825 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
826 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
828 struct netdev_hw_addr *ha;
830 if (bp->state != BNX2X_STATE_OPEN) {
831 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
835 /* clear mailbox and prep first tlv */
836 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
839 /* Get Rx mode requested */
840 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
842 netdev_for_each_mc_addr(ha, dev) {
843 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
845 memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
849 /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
852 if (i >= PFVF_MAX_MULTICAST_PER_VF) {
854 "VF supports not more than %d multicast MAC addresses\n",
855 PFVF_MAX_MULTICAST_PER_VF);
859 req->n_multicast = i;
860 req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
863 /* add list termination tlv */
864 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
865 sizeof(struct channel_list_end_tlv));
867 /* output tlvs list */
868 bnx2x_dp_tlv_list(bp, req);
869 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
871 BNX2X_ERR("Sending a message failed: %d\n", rc);
875 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
876 BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
881 bnx2x_vfpf_finalize(bp, &req->first_tlv);
886 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
888 int mode = bp->rx_mode;
889 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
890 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
893 /* clear mailbox and prep first tlv */
894 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
897 DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
899 /* Ignore everything accept MODE_NONE */
900 if (mode == BNX2X_RX_MODE_NONE) {
901 req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
903 /* Current PF driver will not look at the specific flags,
904 * but they are required when working with older drivers on hv.
906 req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
907 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
908 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
911 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
914 /* add list termination tlv */
915 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
916 sizeof(struct channel_list_end_tlv));
918 /* output tlvs list */
919 bnx2x_dp_tlv_list(bp, req);
921 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
923 BNX2X_ERR("Sending a message failed: %d\n", rc);
925 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
926 BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
930 bnx2x_vfpf_finalize(bp, &req->first_tlv);
935 /* General service functions */
936 static void storm_memset_vf_mbx_ack(struct bnx2x *bp, u16 abs_fid)
938 u32 addr = BAR_CSTRORM_INTMEM +
939 CSTORM_VF_PF_CHANNEL_STATE_OFFSET(abs_fid);
941 REG_WR8(bp, addr, VF_PF_CHANNEL_STATE_READY);
944 static void storm_memset_vf_mbx_valid(struct bnx2x *bp, u16 abs_fid)
946 u32 addr = BAR_CSTRORM_INTMEM +
947 CSTORM_VF_PF_CHANNEL_VALID_OFFSET(abs_fid);
949 REG_WR8(bp, addr, 1);
952 static inline void bnx2x_set_vf_mbxs_valid(struct bnx2x *bp)
957 storm_memset_vf_mbx_valid(bp, bnx2x_vf(bp, i, abs_vfid));
960 /* enable vf_pf mailbox (aka vf-pf-channel) */
961 void bnx2x_vf_enable_mbx(struct bnx2x *bp, u8 abs_vfid)
963 bnx2x_vf_flr_clnup_epilog(bp, abs_vfid);
965 /* enable the mailbox in the FW */
966 storm_memset_vf_mbx_ack(bp, abs_vfid);
967 storm_memset_vf_mbx_valid(bp, abs_vfid);
969 /* enable the VF access to the mailbox */
970 bnx2x_vf_enable_access(bp, abs_vfid);
973 /* this works only on !E1h */
974 static int bnx2x_copy32_vf_dmae(struct bnx2x *bp, u8 from_vf,
975 dma_addr_t pf_addr, u8 vfid, u32 vf_addr_hi,
976 u32 vf_addr_lo, u32 len32)
978 struct dmae_command dmae;
980 if (CHIP_IS_E1x(bp)) {
981 BNX2X_ERR("Chip revision does not support VFs\n");
985 if (!bp->dmae_ready) {
986 BNX2X_ERR("DMAE is not ready, can not copy\n");
990 /* set opcode and fixed command fields */
991 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_PCI);
994 dmae.opcode_iov = (vfid << DMAE_COMMAND_SRC_VFID_SHIFT) |
995 (DMAE_SRC_VF << DMAE_COMMAND_SRC_VFPF_SHIFT) |
996 (DMAE_DST_PF << DMAE_COMMAND_DST_VFPF_SHIFT);
998 dmae.opcode |= (DMAE_C_DST << DMAE_COMMAND_C_FUNC_SHIFT);
1000 dmae.src_addr_lo = vf_addr_lo;
1001 dmae.src_addr_hi = vf_addr_hi;
1002 dmae.dst_addr_lo = U64_LO(pf_addr);
1003 dmae.dst_addr_hi = U64_HI(pf_addr);
1005 dmae.opcode_iov = (vfid << DMAE_COMMAND_DST_VFID_SHIFT) |
1006 (DMAE_DST_VF << DMAE_COMMAND_DST_VFPF_SHIFT) |
1007 (DMAE_SRC_PF << DMAE_COMMAND_SRC_VFPF_SHIFT);
1009 dmae.opcode |= (DMAE_C_SRC << DMAE_COMMAND_C_FUNC_SHIFT);
1011 dmae.src_addr_lo = U64_LO(pf_addr);
1012 dmae.src_addr_hi = U64_HI(pf_addr);
1013 dmae.dst_addr_lo = vf_addr_lo;
1014 dmae.dst_addr_hi = vf_addr_hi;
1018 /* issue the command and wait for completion */
1019 return bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
1022 static void bnx2x_vf_mbx_resp_single_tlv(struct bnx2x *bp,
1023 struct bnx2x_virtf *vf)
1025 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1028 /* prepare response */
1029 type = mbx->first_tlv.tl.type;
1030 length = type == CHANNEL_TLV_ACQUIRE ?
1031 sizeof(struct pfvf_acquire_resp_tlv) :
1032 sizeof(struct pfvf_general_resp_tlv);
1033 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, type, length);
1034 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1035 sizeof(struct channel_list_end_tlv));
1038 static void bnx2x_vf_mbx_resp_send_msg(struct bnx2x *bp,
1039 struct bnx2x_virtf *vf,
1042 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf->index);
1043 struct pfvf_general_resp_tlv *resp = &mbx->msg->resp.general_resp;
1048 bnx2x_dp_tlv_list(bp, resp);
1049 DP(BNX2X_MSG_IOV, "mailbox vf address hi 0x%x, lo 0x%x, offset 0x%x\n",
1050 mbx->vf_addr_hi, mbx->vf_addr_lo, mbx->first_tlv.resp_msg_offset);
1052 resp->hdr.status = bnx2x_pfvf_status_codes(vf_rc);
1055 vf_addr = HILO_U64(mbx->vf_addr_hi, mbx->vf_addr_lo) +
1056 mbx->first_tlv.resp_msg_offset;
1057 pf_addr = mbx->msg_mapping +
1058 offsetof(struct bnx2x_vf_mbx_msg, resp);
1060 /* Copy the response buffer. The first u64 is written afterwards, as
1061 * the vf is sensitive to the header being written
1063 vf_addr += sizeof(u64);
1064 pf_addr += sizeof(u64);
1065 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1068 (sizeof(union pfvf_tlvs) - sizeof(u64))/4);
1070 BNX2X_ERR("Failed to copy response body to VF %d\n",
1074 vf_addr -= sizeof(u64);
1075 pf_addr -= sizeof(u64);
1078 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1081 /* copy the response header including status-done field,
1082 * must be last dmae, must be after FW is acked
1084 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr, vf->abs_vfid,
1089 /* unlock channel mutex */
1090 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1093 BNX2X_ERR("Failed to copy response status to VF %d\n",
1100 bnx2x_vf_release(bp, vf);
1103 static void bnx2x_vf_mbx_resp(struct bnx2x *bp,
1104 struct bnx2x_virtf *vf,
1107 bnx2x_vf_mbx_resp_single_tlv(bp, vf);
1108 bnx2x_vf_mbx_resp_send_msg(bp, vf, rc);
1111 static void bnx2x_vf_mbx_resp_phys_port(struct bnx2x *bp,
1112 struct bnx2x_virtf *vf,
1116 struct vfpf_port_phys_id_resp_tlv *port_id;
1118 if (!(bp->flags & HAS_PHYS_PORT_ID))
1121 bnx2x_add_tlv(bp, buffer, *offset, CHANNEL_TLV_PHYS_PORT_ID,
1122 sizeof(struct vfpf_port_phys_id_resp_tlv));
1124 port_id = (struct vfpf_port_phys_id_resp_tlv *)
1125 (((u8 *)buffer) + *offset);
1126 memcpy(port_id->id, bp->phys_port_id, ETH_ALEN);
1128 /* Offset should continue representing the offset to the tail
1129 * of TLV data (outside this function scope)
1131 *offset += sizeof(struct vfpf_port_phys_id_resp_tlv);
1134 static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1135 struct bnx2x_vf_mbx *mbx, int vfop_status)
1138 struct pfvf_acquire_resp_tlv *resp = &mbx->msg->resp.acquire_resp;
1139 struct pf_vf_resc *resc = &resp->resc;
1140 u8 status = bnx2x_pfvf_status_codes(vfop_status);
1143 memset(resp, 0, sizeof(*resp));
1145 /* fill in pfdev info */
1146 resp->pfdev_info.chip_num = bp->common.chip_id;
1147 resp->pfdev_info.db_size = bp->db_size;
1148 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
1149 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
1151 PFVF_CAP_TPA_UPDATE);
1152 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
1153 sizeof(resp->pfdev_info.fw_ver));
1155 if (status == PFVF_STATUS_NO_RESOURCE ||
1156 status == PFVF_STATUS_SUCCESS) {
1157 /* set resources numbers, if status equals NO_RESOURCE these
1158 * are max possible numbers
1160 resc->num_rxqs = vf_rxq_count(vf) ? :
1161 bnx2x_vf_max_queue_cnt(bp, vf);
1162 resc->num_txqs = vf_txq_count(vf) ? :
1163 bnx2x_vf_max_queue_cnt(bp, vf);
1164 resc->num_sbs = vf_sb_count(vf);
1165 resc->num_mac_filters = vf_mac_rules_cnt(vf);
1166 resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
1167 resc->num_mc_filters = 0;
1169 if (status == PFVF_STATUS_SUCCESS) {
1170 /* fill in the allocated resources */
1171 struct pf_vf_bulletin_content *bulletin =
1172 BP_VF_BULLETIN(bp, vf->index);
1176 vfq_qzone_id(vf, vfq_get(vf, i));
1178 for_each_vf_sb(vf, i) {
1179 resc->hw_sbs[i].hw_sb_id = vf_igu_sb(vf, i);
1180 resc->hw_sbs[i].sb_qid = vf_hc_qzone(vf, i);
1183 /* if a mac has been set for this vf, supply it */
1184 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1185 memcpy(resc->current_mac_addr, bulletin->mac,
1191 DP(BNX2X_MSG_IOV, "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%x\n"
1192 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d, fw_ver: '%s'\n",
1194 resp->pfdev_info.chip_num,
1195 resp->pfdev_info.db_size,
1196 resp->pfdev_info.indices_per_sb,
1197 resp->pfdev_info.pf_cap,
1201 resc->num_mac_filters,
1202 resc->num_vlan_filters,
1203 resc->num_mc_filters,
1204 resp->pfdev_info.fw_ver);
1206 DP_CONT(BNX2X_MSG_IOV, "hw_qids- [ ");
1207 for (i = 0; i < vf_rxq_count(vf); i++)
1208 DP_CONT(BNX2X_MSG_IOV, "%d ", resc->hw_qid[i]);
1209 DP_CONT(BNX2X_MSG_IOV, "], sb_info- [ ");
1210 for (i = 0; i < vf_sb_count(vf); i++)
1211 DP_CONT(BNX2X_MSG_IOV, "%d:%d ",
1212 resc->hw_sbs[i].hw_sb_id,
1213 resc->hw_sbs[i].sb_qid);
1214 DP_CONT(BNX2X_MSG_IOV, "]\n");
1216 /* prepare response */
1217 length = sizeof(struct pfvf_acquire_resp_tlv);
1218 bnx2x_add_tlv(bp, &mbx->msg->resp, 0, CHANNEL_TLV_ACQUIRE, length);
1220 /* Handle possible VF requests for physical port identifiers.
1221 * 'length' should continue to indicate the offset of the first empty
1222 * place in the buffer (i.e., where next TLV should be inserted)
1224 if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1225 CHANNEL_TLV_PHYS_PORT_ID))
1226 bnx2x_vf_mbx_resp_phys_port(bp, vf, &mbx->msg->resp, &length);
1228 bnx2x_add_tlv(bp, &mbx->msg->resp, length, CHANNEL_TLV_LIST_END,
1229 sizeof(struct channel_list_end_tlv));
1231 /* send the response */
1232 bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
1235 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
1236 struct bnx2x_vf_mbx *mbx)
1239 struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire;
1241 /* log vfdef info */
1243 "VF[%d] ACQUIRE: vfdev_info- vf_id %d, vf_os %d resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d, n_mcs-%d\n",
1244 vf->abs_vfid, acquire->vfdev_info.vf_id, acquire->vfdev_info.vf_os,
1245 acquire->resc_request.num_rxqs, acquire->resc_request.num_txqs,
1246 acquire->resc_request.num_sbs, acquire->resc_request.num_mac_filters,
1247 acquire->resc_request.num_vlan_filters,
1248 acquire->resc_request.num_mc_filters);
1250 /* acquire the resources */
1251 rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
1253 /* store address of vf's bulletin board */
1254 vf->bulletin_map = acquire->bulletin_addr;
1257 bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
1260 static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1261 struct bnx2x_vf_mbx *mbx)
1263 struct vfpf_init_tlv *init = &mbx->msg->req.init;
1266 /* record ghost addresses from vf message */
1267 vf->spq_map = init->spq_addr;
1268 vf->fw_stat_map = init->stats_addr;
1269 vf->stats_stride = init->stats_stride;
1270 rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
1272 /* set VF multiqueue statistics collection mode */
1273 if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
1274 vf->cfg_flags |= VF_CFG_STATS_COALESCE;
1277 bnx2x_vf_mbx_resp(bp, vf, rc);
1280 /* convert MBX queue-flags to standard SP queue-flags */
1281 static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1282 unsigned long *sp_q_flags)
1284 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
1285 __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
1286 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
1287 __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
1288 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
1289 __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
1290 if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
1291 __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
1292 if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
1293 __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
1294 if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
1295 __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
1296 if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
1297 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1298 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1299 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1300 if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
1301 __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
1303 /* outer vlan removal is set according to PF's multi function mode */
1305 __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
1308 static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1309 struct bnx2x_vf_mbx *mbx)
1311 struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
1312 struct bnx2x_vf_queue_construct_params qctor;
1316 if (setup_q->vf_qid >= vf_rxq_count(vf)) {
1317 BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
1318 setup_q->vf_qid, vf_rxq_count(vf));
1323 /* tx queues must be setup alongside rx queues thus if the rx queue
1324 * is not marked as valid there's nothing to do.
1326 if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
1327 struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
1328 unsigned long q_type = 0;
1330 struct bnx2x_queue_init_params *init_p;
1331 struct bnx2x_queue_setup_params *setup_p;
1333 if (bnx2x_vfq_is_leading(q))
1334 bnx2x_leading_vfq_init(bp, vf, q);
1336 /* re-init the VF operation context */
1338 sizeof(struct bnx2x_vf_queue_construct_params));
1339 setup_p = &qctor.prep_qsetup;
1340 init_p = &qctor.qstate.params.init;
1342 /* activate immediately */
1343 __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
1345 if (setup_q->param_valid & VFPF_TXQ_VALID) {
1346 struct bnx2x_txq_setup_params *txq_params =
1347 &setup_p->txq_params;
1349 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1351 /* save sb resource index */
1352 q->sb_idx = setup_q->txq.vf_sb;
1355 init_p->tx.hc_rate = setup_q->txq.hc_rate;
1356 init_p->tx.sb_cq_index = setup_q->txq.sb_index;
1358 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1361 /* tx setup - flags */
1362 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1365 /* tx setup - general, nothing */
1368 txq_params->dscr_map = setup_q->txq.txq_addr;
1369 txq_params->sb_cq_index = setup_q->txq.sb_index;
1370 txq_params->traffic_type = setup_q->txq.traffic_type;
1372 bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
1373 q->index, q->sb_idx);
1376 if (setup_q->param_valid & VFPF_RXQ_VALID) {
1377 struct bnx2x_rxq_setup_params *rxq_params =
1378 &setup_p->rxq_params;
1380 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1382 /* Note: there is no support for different SBs
1385 q->sb_idx = setup_q->rxq.vf_sb;
1388 init_p->rx.hc_rate = setup_q->rxq.hc_rate;
1389 init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
1390 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1393 /* rx setup - flags */
1394 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1397 /* rx setup - general */
1398 setup_p->gen_params.mtu = setup_q->rxq.mtu;
1401 rxq_params->drop_flags = setup_q->rxq.drop_flags;
1402 rxq_params->dscr_map = setup_q->rxq.rxq_addr;
1403 rxq_params->sge_map = setup_q->rxq.sge_addr;
1404 rxq_params->rcq_map = setup_q->rxq.rcq_addr;
1405 rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
1406 rxq_params->buf_sz = setup_q->rxq.buf_sz;
1407 rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
1408 rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
1409 rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
1410 rxq_params->cache_line_log =
1411 setup_q->rxq.cache_line_log;
1412 rxq_params->sb_cq_index = setup_q->rxq.sb_index;
1414 /* rx setup - multicast engine */
1415 if (bnx2x_vfq_is_leading(q)) {
1416 u8 mcast_id = FW_VF_HANDLE(vf->abs_vfid);
1418 rxq_params->mcast_engine_id = mcast_id;
1419 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
1422 bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
1423 q->index, q->sb_idx);
1425 /* complete the preparations */
1426 bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type);
1428 rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor);
1433 bnx2x_vf_mbx_resp(bp, vf, rc);
1436 static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
1437 struct bnx2x_virtf *vf,
1438 struct vfpf_set_q_filters_tlv *tlv,
1439 struct bnx2x_vf_mac_vlan_filters **pfl,
1443 struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1446 fsz = tlv->n_mac_vlan_filters *
1447 sizeof(struct bnx2x_vf_mac_vlan_filter) +
1448 sizeof(struct bnx2x_vf_mac_vlan_filters);
1450 fl = kzalloc(fsz, GFP_KERNEL);
1454 for (i = 0, j = 0; i < tlv->n_mac_vlan_filters; i++) {
1455 struct vfpf_q_mac_vlan_filter *msg_filter = &tlv->filters[i];
1457 if ((msg_filter->flags & type_flag) != type_flag)
1459 if (type_flag == VFPF_Q_FILTER_DEST_MAC_VALID) {
1460 fl->filters[j].mac = msg_filter->mac;
1461 fl->filters[j].type = BNX2X_VF_FILTER_MAC;
1463 fl->filters[j].vid = msg_filter->vlan_tag;
1464 fl->filters[j].type = BNX2X_VF_FILTER_VLAN;
1466 fl->filters[j].add =
1467 (msg_filter->flags & VFPF_Q_FILTER_SET_MAC) ?
1479 static void bnx2x_vf_mbx_dp_q_filter(struct bnx2x *bp, int msglvl, int idx,
1480 struct vfpf_q_mac_vlan_filter *filter)
1482 DP(msglvl, "MAC-VLAN[%d] -- flags=0x%x\n", idx, filter->flags);
1483 if (filter->flags & VFPF_Q_FILTER_VLAN_TAG_VALID)
1484 DP_CONT(msglvl, ", vlan=%d", filter->vlan_tag);
1485 if (filter->flags & VFPF_Q_FILTER_DEST_MAC_VALID)
1486 DP_CONT(msglvl, ", MAC=%pM", filter->mac);
1487 DP_CONT(msglvl, "\n");
1490 static void bnx2x_vf_mbx_dp_q_filters(struct bnx2x *bp, int msglvl,
1491 struct vfpf_set_q_filters_tlv *filters)
1495 if (filters->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED)
1496 for (i = 0; i < filters->n_mac_vlan_filters; i++)
1497 bnx2x_vf_mbx_dp_q_filter(bp, msglvl, i,
1498 &filters->filters[i]);
1500 if (filters->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED)
1501 DP(msglvl, "RX-MASK=0x%x\n", filters->rx_mask);
1503 if (filters->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED)
1504 for (i = 0; i < filters->n_multicast; i++)
1505 DP(msglvl, "MULTICAST=%pM\n", filters->multicast[i]);
1508 #define VFPF_MAC_FILTER VFPF_Q_FILTER_DEST_MAC_VALID
1509 #define VFPF_VLAN_FILTER VFPF_Q_FILTER_VLAN_TAG_VALID
1511 static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
1515 struct vfpf_set_q_filters_tlv *msg =
1516 &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters;
1518 /* check for any mac/vlan changes */
1519 if (msg->flags & VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED) {
1520 /* build mac list */
1521 struct bnx2x_vf_mac_vlan_filters *fl = NULL;
1523 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1531 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1538 /* build vlan list */
1541 rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
1548 rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
1556 if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
1557 unsigned long accept = 0;
1558 struct pf_vf_bulletin_content *bulletin =
1559 BP_VF_BULLETIN(bp, vf->index);
1561 /* Ignore VF requested mode; instead set a regular mode */
1562 if (msg->rx_mask != VFPF_RX_MASK_ACCEPT_NONE) {
1563 __set_bit(BNX2X_ACCEPT_UNICAST, &accept);
1564 __set_bit(BNX2X_ACCEPT_MULTICAST, &accept);
1565 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
1568 /* A packet arriving the vf's mac should be accepted
1569 * with any vlan, unless a vlan has already been
1572 if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
1573 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
1576 rc = bnx2x_vf_rxmode(bp, vf, msg->vf_qid, accept);
1581 if (msg->flags & VFPF_SET_Q_FILTERS_MULTICAST_CHANGED) {
1583 rc = bnx2x_vf_mcast(bp, vf, msg->multicast,
1584 msg->n_multicast, false);
1590 BNX2X_ERR("QFILTERS[%d:%d] error: rc %d\n",
1591 vf->abs_vfid, msg->vf_qid, rc);
1595 static int bnx2x_filters_validate_mac(struct bnx2x *bp,
1596 struct bnx2x_virtf *vf,
1597 struct vfpf_set_q_filters_tlv *filters)
1599 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1602 /* if a mac was already set for this VF via the set vf mac ndo, we only
1603 * accept mac configurations of that mac. Why accept them at all?
1604 * because PF may have been unable to configure the mac at the time
1605 * since queue was not set up.
1607 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID) {
1608 /* once a mac was set by ndo can only accept a single mac... */
1609 if (filters->n_mac_vlan_filters > 1) {
1610 BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
1616 /* ...and only the mac set by the ndo */
1617 if (filters->n_mac_vlan_filters == 1 &&
1618 !ether_addr_equal(filters->filters->mac, bulletin->mac)) {
1619 BNX2X_ERR("VF[%d] requested the addition of a mac address not matching the one configured by set_vf_mac ndo\n",
1631 static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
1632 struct bnx2x_virtf *vf,
1633 struct vfpf_set_q_filters_tlv *filters)
1635 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1638 /* if vlan was set by hypervisor we don't allow guest to config vlan */
1639 if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1642 /* search for vlan filters */
1643 for (i = 0; i < filters->n_mac_vlan_filters; i++) {
1644 if (filters->filters[i].flags &
1645 VFPF_Q_FILTER_VLAN_TAG_VALID) {
1646 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1655 if (filters->vf_qid > vf_rxq_count(vf)) {
1664 static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1665 struct bnx2x_virtf *vf,
1666 struct bnx2x_vf_mbx *mbx)
1668 struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
1671 rc = bnx2x_filters_validate_mac(bp, vf, filters);
1675 rc = bnx2x_filters_validate_vlan(bp, vf, filters);
1679 DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
1683 /* print q_filter message */
1684 bnx2x_vf_mbx_dp_q_filters(bp, BNX2X_MSG_IOV, filters);
1686 rc = bnx2x_vf_mbx_qfilters(bp, vf);
1688 bnx2x_vf_mbx_resp(bp, vf, rc);
1691 static void bnx2x_vf_mbx_teardown_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1692 struct bnx2x_vf_mbx *mbx)
1694 int qid = mbx->msg->req.q_op.vf_qid;
1697 DP(BNX2X_MSG_IOV, "VF[%d] Q_TEARDOWN: vf_qid=%d\n",
1700 rc = bnx2x_vf_queue_teardown(bp, vf, qid);
1701 bnx2x_vf_mbx_resp(bp, vf, rc);
1704 static void bnx2x_vf_mbx_close_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1705 struct bnx2x_vf_mbx *mbx)
1709 DP(BNX2X_MSG_IOV, "VF[%d] VF_CLOSE\n", vf->abs_vfid);
1711 rc = bnx2x_vf_close(bp, vf);
1712 bnx2x_vf_mbx_resp(bp, vf, rc);
1715 static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1716 struct bnx2x_vf_mbx *mbx)
1720 DP(BNX2X_MSG_IOV, "VF[%d] VF_RELEASE\n", vf->abs_vfid);
1722 rc = bnx2x_vf_free(bp, vf);
1723 bnx2x_vf_mbx_resp(bp, vf, rc);
1726 static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
1727 struct bnx2x_vf_mbx *mbx)
1729 struct bnx2x_config_rss_params rss;
1730 struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
1733 if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
1734 rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
1735 BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
1741 memset(&rss, 0, sizeof(struct bnx2x_config_rss_params));
1743 /* set vfop params according to rss tlv */
1744 memcpy(rss.ind_table, rss_tlv->ind_table,
1745 T_ETH_INDIRECTION_TABLE_SIZE);
1746 memcpy(rss.rss_key, rss_tlv->rss_key, sizeof(rss_tlv->rss_key));
1747 rss.rss_obj = &vf->rss_conf_obj;
1748 rss.rss_result_mask = rss_tlv->rss_result_mask;
1750 /* flags handled individually for backward/forward compatability */
1752 rss.ramrod_flags = 0;
1754 if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
1755 __set_bit(BNX2X_RSS_MODE_DISABLED, &rss.rss_flags);
1756 if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
1757 __set_bit(BNX2X_RSS_MODE_REGULAR, &rss.rss_flags);
1758 if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
1759 __set_bit(BNX2X_RSS_SET_SRCH, &rss.rss_flags);
1760 if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
1761 __set_bit(BNX2X_RSS_IPV4, &rss.rss_flags);
1762 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
1763 __set_bit(BNX2X_RSS_IPV4_TCP, &rss.rss_flags);
1764 if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
1765 __set_bit(BNX2X_RSS_IPV4_UDP, &rss.rss_flags);
1766 if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
1767 __set_bit(BNX2X_RSS_IPV6, &rss.rss_flags);
1768 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
1769 __set_bit(BNX2X_RSS_IPV6_TCP, &rss.rss_flags);
1770 if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
1771 __set_bit(BNX2X_RSS_IPV6_UDP, &rss.rss_flags);
1773 if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
1774 rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
1775 (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
1776 rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
1777 BNX2X_ERR("about to hit a FW assert. aborting...\n");
1782 rc = bnx2x_vf_rss_update(bp, vf, &rss);
1784 bnx2x_vf_mbx_resp(bp, vf, rc);
1787 static int bnx2x_validate_tpa_params(struct bnx2x *bp,
1788 struct vfpf_tpa_tlv *tpa_tlv)
1792 if (tpa_tlv->tpa_client_info.max_sges_for_packet >
1793 U_ETH_MAX_SGES_FOR_PACKET) {
1795 BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
1796 tpa_tlv->tpa_client_info.max_sges_for_packet,
1797 U_ETH_MAX_SGES_FOR_PACKET);
1800 if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
1802 BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
1803 tpa_tlv->tpa_client_info.max_tpa_queues,
1810 static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
1811 struct bnx2x_vf_mbx *mbx)
1813 struct bnx2x_queue_update_tpa_params vf_op_params;
1814 struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
1817 memset(&vf_op_params, 0, sizeof(vf_op_params));
1819 if (bnx2x_validate_tpa_params(bp, tpa_tlv))
1822 vf_op_params.complete_on_both_clients =
1823 tpa_tlv->tpa_client_info.complete_on_both_clients;
1824 vf_op_params.dont_verify_thr =
1825 tpa_tlv->tpa_client_info.dont_verify_thr;
1826 vf_op_params.max_agg_sz =
1827 tpa_tlv->tpa_client_info.max_agg_size;
1828 vf_op_params.max_sges_pkt =
1829 tpa_tlv->tpa_client_info.max_sges_for_packet;
1830 vf_op_params.max_tpa_queues =
1831 tpa_tlv->tpa_client_info.max_tpa_queues;
1832 vf_op_params.sge_buff_sz =
1833 tpa_tlv->tpa_client_info.sge_buff_size;
1834 vf_op_params.sge_pause_thr_high =
1835 tpa_tlv->tpa_client_info.sge_pause_thr_high;
1836 vf_op_params.sge_pause_thr_low =
1837 tpa_tlv->tpa_client_info.sge_pause_thr_low;
1838 vf_op_params.tpa_mode =
1839 tpa_tlv->tpa_client_info.tpa_mode;
1840 vf_op_params.update_ipv4 =
1841 tpa_tlv->tpa_client_info.update_ipv4;
1842 vf_op_params.update_ipv6 =
1843 tpa_tlv->tpa_client_info.update_ipv6;
1845 rc = bnx2x_vf_tpa_update(bp, vf, tpa_tlv, &vf_op_params);
1848 bnx2x_vf_mbx_resp(bp, vf, rc);
1851 /* dispatch request */
1852 static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1853 struct bnx2x_vf_mbx *mbx)
1857 /* check if tlv type is known */
1858 if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
1859 /* Lock the per vf op mutex and note the locker's identity.
1860 * The unlock will take place in mbx response.
1862 bnx2x_lock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1864 /* switch on the opcode */
1865 switch (mbx->first_tlv.tl.type) {
1866 case CHANNEL_TLV_ACQUIRE:
1867 bnx2x_vf_mbx_acquire(bp, vf, mbx);
1869 case CHANNEL_TLV_INIT:
1870 bnx2x_vf_mbx_init_vf(bp, vf, mbx);
1872 case CHANNEL_TLV_SETUP_Q:
1873 bnx2x_vf_mbx_setup_q(bp, vf, mbx);
1875 case CHANNEL_TLV_SET_Q_FILTERS:
1876 bnx2x_vf_mbx_set_q_filters(bp, vf, mbx);
1878 case CHANNEL_TLV_TEARDOWN_Q:
1879 bnx2x_vf_mbx_teardown_q(bp, vf, mbx);
1881 case CHANNEL_TLV_CLOSE:
1882 bnx2x_vf_mbx_close_vf(bp, vf, mbx);
1884 case CHANNEL_TLV_RELEASE:
1885 bnx2x_vf_mbx_release_vf(bp, vf, mbx);
1887 case CHANNEL_TLV_UPDATE_RSS:
1888 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1890 case CHANNEL_TLV_UPDATE_TPA:
1891 bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
1896 /* unknown TLV - this may belong to a VF driver from the future
1897 * - a version written after this PF driver was written, which
1898 * supports features unknown as of yet. Too bad since we don't
1899 * support them. Or this may be because someone wrote a crappy
1900 * VF driver and is sending garbage over the channel.
1902 BNX2X_ERR("unknown TLV. type %d length %d vf->state was %d. first 20 bytes of mailbox buffer:\n",
1903 mbx->first_tlv.tl.type, mbx->first_tlv.tl.length,
1905 for (i = 0; i < 20; i++)
1906 DP_CONT(BNX2X_MSG_IOV, "%x ",
1907 mbx->msg->req.tlv_buf_size.tlv_buffer[i]);
1910 /* can we respond to VF (do we have an address for it?) */
1911 if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
1912 /* notify the VF that we do not support this request */
1913 bnx2x_vf_mbx_resp(bp, vf, PFVF_STATUS_NOT_SUPPORTED);
1915 /* can't send a response since this VF is unknown to us
1916 * just ack the FW to release the mailbox and unlock
1919 storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
1920 /* Firmware ack should be written before unlocking channel */
1922 bnx2x_unlock_vf_pf_channel(bp, vf, mbx->first_tlv.tl.type);
1926 void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
1927 struct vf_pf_event_data *vfpf_event)
1932 "vf pf event received: vfid %d, address_hi %x, address lo %x",
1933 vfpf_event->vf_id, vfpf_event->msg_addr_hi, vfpf_event->msg_addr_lo);
1934 /* Sanity checks consider removing later */
1936 /* check if the vf_id is valid */
1937 if (vfpf_event->vf_id - BP_VFDB(bp)->sriov.first_vf_in_pf >
1938 BNX2X_NR_VIRTFN(bp)) {
1939 BNX2X_ERR("Illegal vf_id %d max allowed: %d\n",
1940 vfpf_event->vf_id, BNX2X_NR_VIRTFN(bp));
1944 vf_idx = bnx2x_vf_idx_by_abs_fid(bp, vfpf_event->vf_id);
1946 /* Update VFDB with current message and schedule its handling */
1947 mutex_lock(&BP_VFDB(bp)->event_mutex);
1948 BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi;
1949 BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo;
1950 BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
1951 mutex_unlock(&BP_VFDB(bp)->event_mutex);
1953 bnx2x_schedule_iov_task(bp, BNX2X_IOV_HANDLE_VF_MSG);
1956 /* handle new vf-pf messages */
1957 void bnx2x_vf_mbx(struct bnx2x *bp)
1959 struct bnx2x_vfdb *vfdb = BP_VFDB(bp);
1967 mutex_lock(&vfdb->event_mutex);
1968 events = vfdb->event_occur;
1969 vfdb->event_occur = 0;
1970 mutex_unlock(&vfdb->event_mutex);
1972 for_each_vf(bp, vf_idx) {
1973 struct bnx2x_vf_mbx *mbx = BP_VF_MBX(bp, vf_idx);
1974 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
1976 /* Handle VFs which have pending events */
1977 if (!(events & (1ULL << vf_idx)))
1981 "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n",
1982 vf_idx, mbx->vf_addr_hi, mbx->vf_addr_lo,
1983 mbx->first_tlv.resp_msg_offset);
1985 /* dmae to get the VF request */
1986 rc = bnx2x_copy32_vf_dmae(bp, true, mbx->msg_mapping,
1987 vf->abs_vfid, mbx->vf_addr_hi,
1989 sizeof(union vfpf_tlvs)/4);
1991 BNX2X_ERR("Failed to copy request VF %d\n",
1993 bnx2x_vf_release(bp, vf);
1997 /* process the VF message header */
1998 mbx->first_tlv = mbx->msg->req.first_tlv;
2000 /* Clean response buffer to refrain from falsely
2003 memset(&mbx->msg->resp, 0, sizeof(union pfvf_tlvs));
2005 /* dispatch the request (will prepare the response) */
2006 bnx2x_vf_mbx_request(bp, vf, mbx);
2010 /* propagate local bulletin board to vf */
2011 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
2013 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf);
2014 dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping +
2015 vf * BULLETIN_CONTENT_SIZE;
2016 dma_addr_t vf_addr = bnx2x_vf(bp, vf, bulletin_map);
2019 /* can only update vf after init took place */
2020 if (bnx2x_vf(bp, vf, state) != VF_ENABLED &&
2021 bnx2x_vf(bp, vf, state) != VF_ACQUIRED)
2024 /* increment bulletin board version and compute crc */
2025 bulletin->version++;
2026 bulletin->length = BULLETIN_CONTENT_SIZE;
2027 bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
2029 /* propagate bulletin board via dmae to vm memory */
2030 rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
2031 bnx2x_vf(bp, vf, abs_vfid), U64_HI(vf_addr),
2032 U64_LO(vf_addr), bulletin->length / 4);