1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/crc32.h>
35 #include <linux/vmalloc.h>
36 #include <linux/qed/qed_iov_if.h>
40 #include "qed_init_ops.h"
43 #include "qed_reg_addr.h"
45 #include "qed_sriov.h"
49 static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
51 struct vf_start_ramrod_data *p_ramrod = NULL;
52 struct qed_spq_entry *p_ent = NULL;
53 struct qed_sp_init_data init_data;
58 memset(&init_data, 0, sizeof(init_data));
59 init_data.cid = qed_spq_get_cid(p_hwfn);
60 init_data.opaque_fid = p_vf->opaque_fid;
61 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
63 rc = qed_sp_init_request(p_hwfn, &p_ent,
64 COMMON_RAMROD_VF_START,
65 PROTOCOLID_COMMON, &init_data);
69 p_ramrod = &p_ent->ramrod.vf_start;
71 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
72 p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
74 switch (p_hwfn->hw_info.personality) {
76 p_ramrod->personality = PERSONALITY_ETH;
78 case QED_PCI_ETH_ROCE:
79 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
82 DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
83 p_hwfn->hw_info.personality);
87 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
88 if (fp_minor > ETH_HSI_VER_MINOR &&
89 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
92 "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
95 fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
96 fp_minor = ETH_HSI_VER_MINOR;
99 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
100 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
102 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
103 "VF[%d] - Starting using HSI %02x.%02x\n",
104 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
106 return qed_spq_post(p_hwfn, p_ent, NULL);
109 static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
110 u32 concrete_vfid, u16 opaque_vfid)
112 struct vf_stop_ramrod_data *p_ramrod = NULL;
113 struct qed_spq_entry *p_ent = NULL;
114 struct qed_sp_init_data init_data;
118 memset(&init_data, 0, sizeof(init_data));
119 init_data.cid = qed_spq_get_cid(p_hwfn);
120 init_data.opaque_fid = opaque_vfid;
121 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
123 rc = qed_sp_init_request(p_hwfn, &p_ent,
124 COMMON_RAMROD_VF_STOP,
125 PROTOCOLID_COMMON, &init_data);
129 p_ramrod = &p_ent->ramrod.vf_stop;
131 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
133 return qed_spq_post(p_hwfn, p_ent, NULL);
136 static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
138 bool b_enabled_only, bool b_non_malicious)
140 if (!p_hwfn->pf_iov_info) {
141 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
145 if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
149 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
153 if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
160 static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
164 struct qed_vf_info *vf = NULL;
166 if (!p_hwfn->pf_iov_info) {
167 DP_NOTICE(p_hwfn->cdev, "No iov info\n");
171 if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
172 b_enabled_only, false))
173 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
175 DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
181 enum qed_iov_validate_q_mode {
182 QED_IOV_VALIDATE_Q_NA,
183 QED_IOV_VALIDATE_Q_ENABLE,
184 QED_IOV_VALIDATE_Q_DISABLE,
187 static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
188 struct qed_vf_info *p_vf,
190 enum qed_iov_validate_q_mode mode,
193 if (mode == QED_IOV_VALIDATE_Q_NA)
196 if ((b_is_tx && p_vf->vf_queues[qid].p_tx_cid) ||
197 (!b_is_tx && p_vf->vf_queues[qid].p_rx_cid))
198 return mode == QED_IOV_VALIDATE_Q_ENABLE;
200 /* In case we haven't found any valid cid, then its disabled */
201 return mode == QED_IOV_VALIDATE_Q_DISABLE;
204 static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
205 struct qed_vf_info *p_vf,
207 enum qed_iov_validate_q_mode mode)
209 if (rx_qid >= p_vf->num_rxqs) {
212 "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
213 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
217 return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
220 static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
221 struct qed_vf_info *p_vf,
223 enum qed_iov_validate_q_mode mode)
225 if (tx_qid >= p_vf->num_txqs) {
228 "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
229 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
233 return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
236 static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
237 struct qed_vf_info *p_vf, u16 sb_idx)
241 for (i = 0; i < p_vf->num_sbs; i++)
242 if (p_vf->igu_sbs[i] == sb_idx)
247 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
248 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
253 static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
254 struct qed_vf_info *p_vf)
258 for (i = 0; i < p_vf->num_rxqs; i++)
259 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
260 QED_IOV_VALIDATE_Q_ENABLE,
267 static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
268 struct qed_vf_info *p_vf)
272 for (i = 0; i < p_vf->num_txqs; i++)
273 if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
274 QED_IOV_VALIDATE_Q_ENABLE,
281 static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
282 int vfid, struct qed_ptt *p_ptt)
284 struct qed_bulletin_content *p_bulletin;
285 int crc_size = sizeof(p_bulletin->crc);
286 struct qed_dmae_params params;
287 struct qed_vf_info *p_vf;
289 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
293 if (!p_vf->vf_bulletin)
296 p_bulletin = p_vf->bulletin.p_virt;
298 /* Increment bulletin board version and compute crc */
299 p_bulletin->version++;
300 p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
301 p_vf->bulletin.size - crc_size);
303 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
304 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
305 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
307 /* propagate bulletin board via dmae to vm memory */
308 memset(¶ms, 0, sizeof(params));
309 params.flags = QED_DMAE_FLAG_VF_DST;
310 params.dst_vfid = p_vf->abs_vf_id;
311 return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
312 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
316 static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
318 struct qed_hw_sriov_info *iov = cdev->p_iov_info;
321 DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
322 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
324 pci_read_config_word(cdev->pdev,
325 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
326 pci_read_config_word(cdev->pdev,
327 pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
329 pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
333 "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
337 pci_read_config_word(cdev->pdev,
338 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
340 pci_read_config_word(cdev->pdev,
341 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
343 pci_read_config_word(cdev->pdev,
344 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
346 pci_read_config_dword(cdev->pdev,
347 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
349 pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
351 pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
355 "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
361 iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
363 /* Some sanity checks */
364 if (iov->num_vfs > NUM_OF_VFS(cdev) ||
365 iov->total_vfs > NUM_OF_VFS(cdev)) {
366 /* This can happen only due to a bug. In this case we set
367 * num_vfs to zero to avoid memory corruption in the code that
368 * assumes max number of vfs
371 "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
381 static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
382 struct qed_ptt *p_ptt)
384 struct qed_igu_block *p_sb;
388 if (!p_hwfn->hw_info.p_igu_info) {
390 "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
394 for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
396 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
397 if ((p_sb->status & QED_IGU_STATUS_FREE) &&
398 !(p_sb->status & QED_IGU_STATUS_PF)) {
399 val = qed_rd(p_hwfn, p_ptt,
400 IGU_REG_MAPPING_MEMORY + sb_id * 4);
401 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
402 qed_wr(p_hwfn, p_ptt,
403 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
408 static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
410 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
411 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
412 struct qed_bulletin_content *p_bulletin_virt;
413 dma_addr_t req_p, rply_p, bulletin_p;
414 union pfvf_tlvs *p_reply_virt_addr;
415 union vfpf_tlvs *p_req_virt_addr;
418 memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
420 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
421 req_p = p_iov_info->mbx_msg_phys_addr;
422 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
423 rply_p = p_iov_info->mbx_reply_phys_addr;
424 p_bulletin_virt = p_iov_info->p_bulletins;
425 bulletin_p = p_iov_info->bulletins_phys;
426 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
428 "qed_iov_setup_vfdb called without allocating mem first\n");
432 for (idx = 0; idx < p_iov->total_vfs; idx++) {
433 struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
436 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
437 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
438 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
439 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
441 vf->state = VF_STOPPED;
444 vf->bulletin.phys = idx *
445 sizeof(struct qed_bulletin_content) +
447 vf->bulletin.p_virt = p_bulletin_virt + idx;
448 vf->bulletin.size = sizeof(struct qed_bulletin_content);
450 vf->relative_vf_id = idx;
451 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
452 concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
453 vf->concrete_fid = concrete;
454 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
455 (vf->abs_vf_id << 8);
456 vf->vport_id = idx + 1;
458 vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
459 vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
463 static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
465 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
469 num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
471 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
472 "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
474 /* Allocate PF Mailbox buffer (per-VF) */
475 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
476 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
477 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
478 p_iov_info->mbx_msg_size,
479 &p_iov_info->mbx_msg_phys_addr,
484 /* Allocate PF Mailbox Reply buffer (per-VF) */
485 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
486 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
487 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
488 p_iov_info->mbx_reply_size,
489 &p_iov_info->mbx_reply_phys_addr,
494 p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
496 p_v_addr = &p_iov_info->p_bulletins;
497 *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
498 p_iov_info->bulletins_size,
499 &p_iov_info->bulletins_phys,
506 "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
507 p_iov_info->mbx_msg_virt_addr,
508 (u64) p_iov_info->mbx_msg_phys_addr,
509 p_iov_info->mbx_reply_virt_addr,
510 (u64) p_iov_info->mbx_reply_phys_addr,
511 p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
516 static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
518 struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
520 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
521 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
522 p_iov_info->mbx_msg_size,
523 p_iov_info->mbx_msg_virt_addr,
524 p_iov_info->mbx_msg_phys_addr);
526 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
527 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
528 p_iov_info->mbx_reply_size,
529 p_iov_info->mbx_reply_virt_addr,
530 p_iov_info->mbx_reply_phys_addr);
532 if (p_iov_info->p_bulletins)
533 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
534 p_iov_info->bulletins_size,
535 p_iov_info->p_bulletins,
536 p_iov_info->bulletins_phys);
539 int qed_iov_alloc(struct qed_hwfn *p_hwfn)
541 struct qed_pf_iov *p_sriov;
543 if (!IS_PF_SRIOV(p_hwfn)) {
544 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
545 "No SR-IOV - no need for IOV db\n");
549 p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
553 p_hwfn->pf_iov_info = p_sriov;
555 return qed_iov_allocate_vfdb(p_hwfn);
558 void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
560 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
563 qed_iov_setup_vfdb(p_hwfn);
564 qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
567 void qed_iov_free(struct qed_hwfn *p_hwfn)
569 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
570 qed_iov_free_vfdb(p_hwfn);
571 kfree(p_hwfn->pf_iov_info);
575 void qed_iov_free_hw_info(struct qed_dev *cdev)
577 kfree(cdev->p_iov_info);
578 cdev->p_iov_info = NULL;
581 int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
583 struct qed_dev *cdev = p_hwfn->cdev;
587 if (IS_VF(p_hwfn->cdev))
590 /* Learn the PCI configuration */
591 pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
592 PCI_EXT_CAP_ID_SRIOV);
594 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
598 /* Allocate a new struct for IOV information */
599 cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
600 if (!cdev->p_iov_info)
603 cdev->p_iov_info->pos = pos;
605 rc = qed_iov_pci_cfg_info(cdev);
609 /* We want PF IOV to be synonemous with the existance of p_iov_info;
610 * In case the capability is published but there are no VFs, simply
611 * de-allocate the struct.
613 if (!cdev->p_iov_info->total_vfs) {
614 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
615 "IOV capabilities, but no VFs are published\n");
616 kfree(cdev->p_iov_info);
617 cdev->p_iov_info = NULL;
621 /* First VF index based on offset is tricky:
622 * - If ARI is supported [likely], offset - (16 - pf_id) would
623 * provide the number for eng0. 2nd engine Vfs would begin
624 * after the first engine's VFs.
625 * - If !ARI, VFs would start on next device.
626 * so offset - (256 - pf_id) would provide the number.
627 * Utilize the fact that (256 - pf_id) is achieved only by later
628 * to differentiate between the two.
631 if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
632 u32 first = p_hwfn->cdev->p_iov_info->offset +
633 p_hwfn->abs_pf_id - 16;
635 cdev->p_iov_info->first_vf_in_pf = first;
637 if (QED_PATH_ID(p_hwfn))
638 cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
640 u32 first = p_hwfn->cdev->p_iov_info->offset +
641 p_hwfn->abs_pf_id - 256;
643 cdev->p_iov_info->first_vf_in_pf = first;
646 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
647 "First VF in hwfn 0x%08x\n",
648 cdev->p_iov_info->first_vf_in_pf);
653 bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
654 int vfid, bool b_fail_malicious)
656 /* Check PF supports sriov */
657 if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
658 !IS_PF_SRIOV_ALLOC(p_hwfn))
661 /* Check VF validity */
662 if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
668 bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
670 return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
673 static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
674 u16 rel_vf_id, u8 to_disable)
676 struct qed_vf_info *vf;
679 for_each_hwfn(cdev, i) {
680 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
682 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
686 vf->to_disable = to_disable;
690 static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
694 if (!IS_QED_SRIOV(cdev))
697 for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
698 qed_iov_set_vf_to_disable(cdev, i, to_disable);
701 static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
702 struct qed_ptt *p_ptt, u8 abs_vfid)
704 qed_wr(p_hwfn, p_ptt,
705 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
706 1 << (abs_vfid & 0x1f));
709 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
710 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
714 /* Set VF masks and configuration - pretend */
715 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
717 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
720 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
722 /* iterate over all queues, clear sb consumer */
723 for (i = 0; i < vf->num_sbs; i++)
724 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
726 vf->opaque_fid, true);
729 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
730 struct qed_ptt *p_ptt,
731 struct qed_vf_info *vf, bool enable)
735 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
737 igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
740 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
742 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
744 qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
747 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
750 static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
751 struct qed_ptt *p_ptt,
752 struct qed_vf_info *vf)
754 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
757 /* It's possible VF was previously considered malicious -
758 * clear the indication even if we're only going to disable VF.
760 vf->b_malicious = false;
767 "Enable internal access for vf %x [abs %x]\n",
768 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
770 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
772 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
774 rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
778 qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
780 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
781 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
783 qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
784 p_hwfn->hw_info.hw_mode);
787 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
795 * @brief qed_iov_config_perm_table - configure the permission
797 * In E4, queue zone permission table size is 320x9. There
798 * are 320 VF queues for single engine device (256 for dual
799 * engine device), and each entry has the following format:
806 static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
807 struct qed_ptt *p_ptt,
808 struct qed_vf_info *vf, u8 enable)
814 for (qid = 0; qid < vf->num_rxqs; qid++) {
815 qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
818 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
819 val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
820 qed_wr(p_hwfn, p_ptt, reg_addr, val);
824 static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
825 struct qed_ptt *p_ptt,
826 struct qed_vf_info *vf)
828 /* Reset vf in IGU - interrupts are still disabled */
829 qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
831 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
833 /* Permission Table */
834 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
837 static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
838 struct qed_ptt *p_ptt,
839 struct qed_vf_info *vf, u16 num_rx_queues)
841 struct qed_igu_block *igu_blocks;
842 int qid = 0, igu_id = 0;
845 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
847 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
848 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
849 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
851 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
852 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
853 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
855 while ((qid < num_rx_queues) &&
856 (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
857 if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
858 struct cau_sb_entry sb_entry;
860 vf->igu_sbs[qid] = (u16)igu_id;
861 igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
863 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
865 qed_wr(p_hwfn, p_ptt,
866 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
869 /* Configure igu sb in CAU which were marked valid */
870 qed_init_cau_sb_entry(p_hwfn, &sb_entry,
873 qed_dmae_host2grc(p_hwfn, p_ptt,
874 (u64)(uintptr_t)&sb_entry,
875 CAU_REG_SB_VAR_MEMORY +
876 igu_id * sizeof(u64), 2, 0);
882 vf->num_sbs = (u8) num_rx_queues;
887 static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
888 struct qed_ptt *p_ptt,
889 struct qed_vf_info *vf)
891 struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
895 /* Invalidate igu CAM lines and mark them as free */
896 for (idx = 0; idx < vf->num_sbs; idx++) {
897 igu_id = vf->igu_sbs[idx];
898 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
900 val = qed_rd(p_hwfn, p_ptt, addr);
901 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
902 qed_wr(p_hwfn, p_ptt, addr, val);
904 p_info->igu_map.igu_blocks[igu_id].status |=
907 p_hwfn->hw_info.p_igu_info->free_blks++;
913 static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
915 struct qed_mcp_link_params *params,
916 struct qed_mcp_link_state *link,
917 struct qed_mcp_link_capabilities *p_caps)
919 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
922 struct qed_bulletin_content *p_bulletin;
927 p_bulletin = p_vf->bulletin.p_virt;
928 p_bulletin->req_autoneg = params->speed.autoneg;
929 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
930 p_bulletin->req_forced_speed = params->speed.forced_speed;
931 p_bulletin->req_autoneg_pause = params->pause.autoneg;
932 p_bulletin->req_forced_rx = params->pause.forced_rx;
933 p_bulletin->req_forced_tx = params->pause.forced_tx;
934 p_bulletin->req_loopback = params->loopback_mode;
936 p_bulletin->link_up = link->link_up;
937 p_bulletin->speed = link->speed;
938 p_bulletin->full_duplex = link->full_duplex;
939 p_bulletin->autoneg = link->an;
940 p_bulletin->autoneg_complete = link->an_complete;
941 p_bulletin->parallel_detection = link->parallel_detection;
942 p_bulletin->pfc_enabled = link->pfc_enabled;
943 p_bulletin->partner_adv_speed = link->partner_adv_speed;
944 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
945 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
946 p_bulletin->partner_adv_pause = link->partner_adv_pause;
947 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
949 p_bulletin->capability_speed = p_caps->speed_capabilities;
952 static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
953 struct qed_ptt *p_ptt,
954 struct qed_iov_vf_init_params *p_params)
956 struct qed_mcp_link_capabilities link_caps;
957 struct qed_mcp_link_params link_params;
958 struct qed_mcp_link_state link_state;
959 u8 num_of_vf_avaiable_chains = 0;
960 struct qed_vf_info *vf = NULL;
966 vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
968 DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
973 DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
974 p_params->rel_vf_id);
978 /* Perform sanity checking on the requested queue_id */
979 for (i = 0; i < p_params->num_queues; i++) {
980 u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
981 u16 max_vf_qzone = min_vf_qzone +
982 FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
984 qid = p_params->req_rx_queue[i];
985 if (qid < min_vf_qzone || qid > max_vf_qzone) {
987 "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
990 min_vf_qzone, max_vf_qzone);
994 qid = p_params->req_tx_queue[i];
995 if (qid > max_vf_qzone) {
997 "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
998 qid, p_params->rel_vf_id, max_vf_qzone);
1002 /* If client *really* wants, Tx qid can be shared with PF */
1003 if (qid < min_vf_qzone)
1006 "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1007 p_params->rel_vf_id, qid, i);
1010 /* Limit number of queues according to number of CIDs */
1011 qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1014 "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
1015 vf->relative_vf_id, p_params->num_queues, (u16)cids);
1016 num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
1018 num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
1021 if (!num_of_vf_avaiable_chains) {
1022 DP_ERR(p_hwfn, "no available igu sbs\n");
1026 /* Choose queue number and index ranges */
1027 vf->num_rxqs = num_of_vf_avaiable_chains;
1028 vf->num_txqs = num_of_vf_avaiable_chains;
1030 for (i = 0; i < vf->num_rxqs; i++) {
1031 struct qed_vf_q_info *p_queue = &vf->vf_queues[i];
1033 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1034 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1036 /* CIDs are per-VF, so no problem having them 0-based. */
1037 p_queue->fw_cid = i;
1039 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1040 "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x] CID %04x\n",
1044 p_queue->fw_tx_qid, p_queue->fw_cid);
1047 /* Update the link configuration in bulletin */
1048 memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
1049 sizeof(link_params));
1050 memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
1051 memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
1053 qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
1054 &link_params, &link_state, &link_caps);
1056 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1060 if (IS_LEAD_HWFN(p_hwfn))
1061 p_hwfn->cdev->p_iov_info->num_vfs++;
1067 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
1068 struct qed_ptt *p_ptt, u16 rel_vf_id)
1070 struct qed_mcp_link_capabilities caps;
1071 struct qed_mcp_link_params params;
1072 struct qed_mcp_link_state link;
1073 struct qed_vf_info *vf = NULL;
1075 vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1077 DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
1081 if (vf->bulletin.p_virt)
1082 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
1084 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1086 /* Get the link configuration back in bulletin so
1087 * that when VFs are re-enabled they get the actual
1088 * link configuration.
1090 memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params));
1091 memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
1092 memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
1093 qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1095 /* Forget the VF's acquisition message */
1096 memset(&vf->acquire, 0, sizeof(vf->acquire));
1098 /* disablng interrupts and resetting permission table was done during
1099 * vf-close, however, we could get here without going through vf_close
1101 /* Disable Interrupts for VF */
1102 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1104 /* Reset Permission table */
1105 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1109 qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1114 if (IS_LEAD_HWFN(p_hwfn))
1115 p_hwfn->cdev->p_iov_info->num_vfs--;
1121 static bool qed_iov_tlv_supported(u16 tlvtype)
1123 return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
1126 /* place a given tlv on the tlv buffer, continuing current tlv list */
1127 void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
1129 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1132 tl->length = length;
1134 /* Offset should keep pointing to next TLV (the end of the last) */
1137 /* Return a pointer to the start of the added tlv */
1138 return *offset - length;
1141 /* list the types and lengths of the tlvs on the buffer */
1142 void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
1144 u16 i = 1, total_length = 0;
1145 struct channel_tlv *tlv;
1148 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1151 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1152 "TLV number %d: type %d, length %d\n",
1153 i, tlv->type, tlv->length);
1155 if (tlv->type == CHANNEL_TLV_LIST_END)
1158 /* Validate entry - protect against malicious VFs */
1160 DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
1164 total_length += tlv->length;
1166 if (total_length >= sizeof(struct tlv_buffer_size)) {
1167 DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
1175 static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
1176 struct qed_ptt *p_ptt,
1177 struct qed_vf_info *p_vf,
1178 u16 length, u8 status)
1180 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1181 struct qed_dmae_params params;
1184 mbx->reply_virt->default_resp.hdr.status = status;
1186 qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
1188 eng_vf_id = p_vf->abs_vf_id;
1190 memset(¶ms, 0, sizeof(struct qed_dmae_params));
1191 params.flags = QED_DMAE_FLAG_VF_DST;
1192 params.dst_vfid = eng_vf_id;
1194 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1195 mbx->req_virt->first_tlv.reply_address +
1197 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1200 /* Once PF copies the rc to the VF, the latter can continue
1201 * and send an additional message. So we have to make sure the
1202 * channel would be re-set to ready prior to that.
1205 GTT_BAR0_MAP_REG_USDM_RAM +
1206 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1208 qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1209 mbx->req_virt->first_tlv.reply_address,
1210 sizeof(u64) / 4, ¶ms);
1213 static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
1214 enum qed_iov_vport_update_flag flag)
1217 case QED_IOV_VP_UPDATE_ACTIVATE:
1218 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1219 case QED_IOV_VP_UPDATE_VLAN_STRIP:
1220 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1221 case QED_IOV_VP_UPDATE_TX_SWITCH:
1222 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1223 case QED_IOV_VP_UPDATE_MCAST:
1224 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1225 case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
1226 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1227 case QED_IOV_VP_UPDATE_RSS:
1228 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1229 case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1230 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1231 case QED_IOV_VP_UPDATE_SGE_TPA:
1232 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1238 static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
1239 struct qed_vf_info *p_vf,
1240 struct qed_iov_vf_mbx *p_mbx,
1242 u16 tlvs_mask, u16 tlvs_accepted)
1244 struct pfvf_def_resp_tlv *resp;
1245 u16 size, total_len, i;
1247 memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1248 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1249 size = sizeof(struct pfvf_def_resp_tlv);
1252 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1254 /* Prepare response for all extended tlvs if they are found by PF */
1255 for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
1256 if (!(tlvs_mask & BIT(i)))
1259 resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
1260 qed_iov_vport_to_tlv(p_hwfn, i), size);
1262 if (tlvs_accepted & BIT(i))
1263 resp->hdr.status = status;
1265 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1269 "VF[%d] - vport_update response: TLV %d, status %02x\n",
1270 p_vf->relative_vf_id,
1271 qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1276 qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1277 sizeof(struct channel_list_end_tlv));
1282 static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
1283 struct qed_ptt *p_ptt,
1284 struct qed_vf_info *vf_info,
1285 u16 type, u16 length, u8 status)
1287 struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1289 mbx->offset = (u8 *)mbx->reply_virt;
1291 qed_add_tlv(p_hwfn, &mbx->offset, type, length);
1292 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1293 sizeof(struct channel_list_end_tlv));
1295 qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1299 qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
1301 bool b_enabled_only)
1303 struct qed_vf_info *vf = NULL;
1305 vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1309 return &vf->p_vf_info;
1312 static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
1314 struct qed_public_vf_info *vf_info;
1316 vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
1321 /* Clear the VF mac */
1322 eth_zero_addr(vf_info->mac);
1324 vf_info->rx_accept_mode = 0;
1325 vf_info->tx_accept_mode = 0;
1328 static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
1329 struct qed_vf_info *p_vf)
1333 p_vf->vf_bulletin = 0;
1334 p_vf->vport_instance = 0;
1335 p_vf->configured_features = 0;
1337 /* If VF previously requested less resources, go back to default */
1338 p_vf->num_rxqs = p_vf->num_sbs;
1339 p_vf->num_txqs = p_vf->num_sbs;
1341 p_vf->num_active_rxqs = 0;
1343 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1344 struct qed_vf_q_info *p_queue = &p_vf->vf_queues[i];
1346 if (p_queue->p_rx_cid) {
1347 qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
1348 p_queue->p_rx_cid = NULL;
1351 if (p_queue->p_tx_cid) {
1352 qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
1353 p_queue->p_tx_cid = NULL;
1357 memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1358 memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1359 qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
1362 static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
1363 struct qed_ptt *p_ptt,
1364 struct qed_vf_info *p_vf,
1365 struct vf_pf_resc_request *p_req,
1366 struct pf_vf_resc *p_resp)
1370 /* Queue related information */
1371 p_resp->num_rxqs = p_vf->num_rxqs;
1372 p_resp->num_txqs = p_vf->num_txqs;
1373 p_resp->num_sbs = p_vf->num_sbs;
1375 for (i = 0; i < p_resp->num_sbs; i++) {
1376 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1377 p_resp->hw_sbs[i].sb_qid = 0;
1380 /* These fields are filled for backward compatibility.
1381 * Unused by modern vfs.
1383 for (i = 0; i < p_resp->num_rxqs; i++) {
1384 qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1385 (u16 *)&p_resp->hw_qid[i]);
1386 p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1389 /* Filter related information */
1390 p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
1391 p_req->num_mac_filters);
1392 p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
1393 p_req->num_vlan_filters);
1395 /* This isn't really needed/enforced, but some legacy VFs might depend
1396 * on the correct filling of this field.
1398 p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
1400 /* Validate sufficient resources for VF */
1401 if (p_resp->num_rxqs < p_req->num_rxqs ||
1402 p_resp->num_txqs < p_req->num_txqs ||
1403 p_resp->num_sbs < p_req->num_sbs ||
1404 p_resp->num_mac_filters < p_req->num_mac_filters ||
1405 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1406 p_resp->num_mc_filters < p_req->num_mc_filters) {
1409 "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
1417 p_req->num_mac_filters,
1418 p_resp->num_mac_filters,
1419 p_req->num_vlan_filters,
1420 p_resp->num_vlan_filters,
1421 p_req->num_mc_filters, p_resp->num_mc_filters);
1423 /* Some legacy OSes are incapable of correctly handling this
1426 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1427 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1428 (p_vf->acquire.vfdev_info.os_type ==
1429 VFPF_ACQUIRE_OS_WINDOWS))
1430 return PFVF_STATUS_SUCCESS;
1432 return PFVF_STATUS_NO_RESOURCE;
1435 return PFVF_STATUS_SUCCESS;
1438 static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
1439 struct pfvf_stats_info *p_stats)
1441 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1442 offsetof(struct mstorm_vf_zone,
1443 non_trigger.eth_queue_stat);
1444 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1445 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1446 offsetof(struct ustorm_vf_zone,
1447 non_trigger.eth_queue_stat);
1448 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1449 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1450 offsetof(struct pstorm_vf_zone,
1451 non_trigger.eth_queue_stat);
1452 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1453 p_stats->tstats.address = 0;
1454 p_stats->tstats.len = 0;
1457 static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1458 struct qed_ptt *p_ptt,
1459 struct qed_vf_info *vf)
1461 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1462 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1463 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1464 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1465 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1466 struct pf_vf_resc *resc = &resp->resc;
1469 memset(resp, 0, sizeof(*resp));
1471 /* Write the PF version so that VF would know which version
1472 * is supported - might be later overriden. This guarantees that
1473 * VF could recognize legacy PF based on lack of versions in reply.
1475 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1476 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1478 if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
1481 "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1482 vf->abs_vf_id, vf->state);
1486 /* Validate FW compatibility */
1487 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1488 if (req->vfdev_info.capabilities &
1489 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1490 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1492 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1493 "VF[%d] is pre-fastpath HSI\n",
1495 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1496 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1499 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
1501 req->vfdev_info.eth_fp_hsi_major,
1502 req->vfdev_info.eth_fp_hsi_minor,
1503 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1509 /* On 100g PFs, prevent old VFs from loading */
1510 if ((p_hwfn->cdev->num_hwfns > 1) &&
1511 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1513 "VF[%d] is running an old driver that doesn't support 100g\n",
1518 /* Store the acquire message */
1519 memcpy(&vf->acquire, req, sizeof(vf->acquire));
1521 vf->opaque_fid = req->vfdev_info.opaque_fid;
1523 vf->vf_bulletin = req->bulletin_addr;
1524 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1525 vf->bulletin.size : req->bulletin_size;
1527 /* fill in pfdev info */
1528 pfdev_info->chip_num = p_hwfn->cdev->chip_num;
1529 pfdev_info->db_size = 0;
1530 pfdev_info->indices_per_sb = PIS_PER_SB;
1532 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1533 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1534 if (p_hwfn->cdev->num_hwfns > 1)
1535 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1537 qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1539 memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
1541 pfdev_info->fw_major = FW_MAJOR_VERSION;
1542 pfdev_info->fw_minor = FW_MINOR_VERSION;
1543 pfdev_info->fw_rev = FW_REVISION_VERSION;
1544 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1546 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1549 pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
1550 req->vfdev_info.eth_fp_hsi_minor);
1551 pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
1552 qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
1554 pfdev_info->dev_type = p_hwfn->cdev->type;
1555 pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
1557 /* Fill resources available to VF; Make sure there are enough to
1558 * satisfy the VF's request.
1560 vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1561 &req->resc_request, resc);
1562 if (vfpf_status != PFVF_STATUS_SUCCESS)
1565 /* Start the VF in FW */
1566 rc = qed_sp_vf_start(p_hwfn, vf);
1568 DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
1569 vfpf_status = PFVF_STATUS_FAILURE;
1573 /* Fill agreed size of bulletin board in response */
1574 resp->bulletin_size = vf->bulletin.size;
1575 qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1579 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
1580 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
1582 resp->pfdev_info.chip_num,
1583 resp->pfdev_info.db_size,
1584 resp->pfdev_info.indices_per_sb,
1585 resp->pfdev_info.capabilities,
1589 resc->num_mac_filters,
1590 resc->num_vlan_filters);
1591 vf->state = VF_ACQUIRED;
1593 /* Prepare Response */
1595 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1596 sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
1599 static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
1600 struct qed_vf_info *p_vf, bool val)
1602 struct qed_sp_vport_update_params params;
1605 if (val == p_vf->spoof_chk) {
1606 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1607 "Spoofchk value[%d] is already configured\n", val);
1611 memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params));
1612 params.opaque_fid = p_vf->opaque_fid;
1613 params.vport_id = p_vf->vport_id;
1614 params.update_anti_spoofing_en_flg = 1;
1615 params.anti_spoofing_en = val;
1617 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL);
1619 p_vf->spoof_chk = val;
1620 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1621 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1622 "Spoofchk val[%d] configured\n", val);
1624 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1625 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1626 val, p_vf->relative_vf_id);
1632 static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
1633 struct qed_vf_info *p_vf)
1635 struct qed_filter_ucast filter;
1639 memset(&filter, 0, sizeof(filter));
1640 filter.is_rx_filter = 1;
1641 filter.is_tx_filter = 1;
1642 filter.vport_to_add_to = p_vf->vport_id;
1643 filter.opcode = QED_FILTER_ADD;
1645 /* Reconfigure vlans */
1646 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1647 if (!p_vf->shadow_config.vlans[i].used)
1650 filter.type = QED_FILTER_VLAN;
1651 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1652 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1653 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1654 filter.vlan, p_vf->relative_vf_id);
1655 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1656 &filter, QED_SPQ_MODE_CB, NULL);
1659 "Failed to configure VLAN [%04x] to VF [%04x]\n",
1660 filter.vlan, p_vf->relative_vf_id);
1669 qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
1670 struct qed_vf_info *p_vf, u64 events)
1674 if ((events & BIT(VLAN_ADDR_FORCED)) &&
1675 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1676 rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1681 static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
1682 struct qed_vf_info *p_vf, u64 events)
1685 struct qed_filter_ucast filter;
1687 if (!p_vf->vport_instance)
1690 if (events & BIT(MAC_ADDR_FORCED)) {
1691 /* Since there's no way [currently] of removing the MAC,
1692 * we can always assume this means we need to force it.
1694 memset(&filter, 0, sizeof(filter));
1695 filter.type = QED_FILTER_MAC;
1696 filter.opcode = QED_FILTER_REPLACE;
1697 filter.is_rx_filter = 1;
1698 filter.is_tx_filter = 1;
1699 filter.vport_to_add_to = p_vf->vport_id;
1700 ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
1702 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1703 &filter, QED_SPQ_MODE_CB, NULL);
1706 "PF failed to configure MAC for VF\n");
1710 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1713 if (events & BIT(VLAN_ADDR_FORCED)) {
1714 struct qed_sp_vport_update_params vport_update;
1718 memset(&filter, 0, sizeof(filter));
1719 filter.type = QED_FILTER_VLAN;
1720 filter.is_rx_filter = 1;
1721 filter.is_tx_filter = 1;
1722 filter.vport_to_add_to = p_vf->vport_id;
1723 filter.vlan = p_vf->bulletin.p_virt->pvid;
1724 filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
1727 /* Send the ramrod */
1728 rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1729 &filter, QED_SPQ_MODE_CB, NULL);
1732 "PF failed to configure VLAN for VF\n");
1736 /* Update the default-vlan & silent vlan stripping */
1737 memset(&vport_update, 0, sizeof(vport_update));
1738 vport_update.opaque_fid = p_vf->opaque_fid;
1739 vport_update.vport_id = p_vf->vport_id;
1740 vport_update.update_default_vlan_enable_flg = 1;
1741 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1742 vport_update.update_default_vlan_flg = 1;
1743 vport_update.default_vlan = filter.vlan;
1745 vport_update.update_inner_vlan_removal_flg = 1;
1746 removal = filter.vlan ? 1
1747 : p_vf->shadow_config.inner_vlan_removal;
1748 vport_update.inner_vlan_removal_flg = removal;
1749 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1750 rc = qed_sp_vport_update(p_hwfn,
1752 QED_SPQ_MODE_EBLOCK, NULL);
1755 "PF failed to configure VF vport for vlan\n");
1759 /* Update all the Rx queues */
1760 for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
1761 struct qed_queue_cid *p_cid;
1763 p_cid = p_vf->vf_queues[i].p_rx_cid;
1767 rc = qed_sp_eth_rx_queues_update(p_hwfn,
1770 QED_SPQ_MODE_EBLOCK,
1774 "Failed to send Rx update fo queue[0x%04x]\n",
1775 p_cid->rel.queue_id);
1781 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1783 p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
1786 /* If forced features are terminated, we need to configure the shadow
1787 * configuration back again.
1790 qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1795 static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
1796 struct qed_ptt *p_ptt,
1797 struct qed_vf_info *vf)
1799 struct qed_sp_vport_start_params params = { 0 };
1800 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1801 struct vfpf_vport_start_tlv *start;
1802 u8 status = PFVF_STATUS_SUCCESS;
1803 struct qed_vf_info *vf_info;
1808 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
1810 DP_NOTICE(p_hwfn->cdev,
1811 "Failed to get VF info, invalid vfid [%d]\n",
1812 vf->relative_vf_id);
1816 vf->state = VF_ENABLED;
1817 start = &mbx->req_virt->start_vport;
1819 qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1821 /* Initialize Status block in CAU */
1822 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1823 if (!start->sb_addr[sb_id]) {
1824 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1825 "VF[%d] did not fill the address of SB %d\n",
1826 vf->relative_vf_id, sb_id);
1830 qed_int_cau_conf_sb(p_hwfn, p_ptt,
1831 start->sb_addr[sb_id],
1832 vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
1835 vf->mtu = start->mtu;
1836 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1838 /* Take into consideration configuration forced by hypervisor;
1839 * If none is configured, use the supplied VF values [for old
1840 * vfs that would still be fine, since they passed '0' as padding].
1842 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1843 if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1844 u8 vf_req = start->only_untagged;
1846 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1847 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1850 params.tpa_mode = start->tpa_mode;
1851 params.remove_inner_vlan = start->inner_vlan_removal;
1852 params.tx_switching = true;
1854 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1855 params.drop_ttl0 = false;
1856 params.concrete_fid = vf->concrete_fid;
1857 params.opaque_fid = vf->opaque_fid;
1858 params.vport_id = vf->vport_id;
1859 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1860 params.mtu = vf->mtu;
1861 params.check_mac = true;
1863 rc = qed_sp_eth_vport_start(p_hwfn, ¶ms);
1866 "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
1867 status = PFVF_STATUS_FAILURE;
1869 vf->vport_instance++;
1871 /* Force configuration if needed on the newly opened vport */
1872 qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1874 __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1876 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1877 sizeof(struct pfvf_def_resp_tlv), status);
1880 static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
1881 struct qed_ptt *p_ptt,
1882 struct qed_vf_info *vf)
1884 u8 status = PFVF_STATUS_SUCCESS;
1887 vf->vport_instance--;
1888 vf->spoof_chk = false;
1890 if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
1891 (qed_iov_validate_active_txq(p_hwfn, vf))) {
1892 vf->b_malicious = true;
1894 "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
1896 status = PFVF_STATUS_MALICIOUS;
1900 rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1902 DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
1904 status = PFVF_STATUS_FAILURE;
1907 /* Forget the configuration on the vport */
1908 vf->configured_features = 0;
1909 memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
1912 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1913 sizeof(struct pfvf_def_resp_tlv), status);
1916 static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
1917 struct qed_ptt *p_ptt,
1918 struct qed_vf_info *vf,
1919 u8 status, bool b_legacy)
1921 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1922 struct pfvf_start_queue_resp_tlv *p_tlv;
1923 struct vfpf_start_rxq_tlv *req;
1926 mbx->offset = (u8 *)mbx->reply_virt;
1928 /* Taking a bigger struct instead of adding a TLV to list was a
1929 * mistake, but one which we're now stuck with, as some older
1930 * clients assume the size of the previous response.
1933 length = sizeof(*p_tlv);
1935 length = sizeof(struct pfvf_def_resp_tlv);
1937 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
1939 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1940 sizeof(struct channel_list_end_tlv));
1942 /* Update the TLV with the response */
1943 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
1944 req = &mbx->req_virt->start_rxq;
1945 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
1946 offsetof(struct mstorm_vf_zone,
1947 non_trigger.eth_rx_queue_producers) +
1948 sizeof(struct eth_rx_prod_data) * req->rx_qid;
1951 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
1954 static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
1955 struct qed_ptt *p_ptt,
1956 struct qed_vf_info *vf)
1958 struct qed_queue_start_common_params params;
1959 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
1960 u8 status = PFVF_STATUS_NO_RESOURCE;
1961 struct qed_vf_q_info *p_queue;
1962 struct vfpf_start_rxq_tlv *req;
1963 bool b_legacy_vf = false;
1966 req = &mbx->req_virt->start_rxq;
1968 if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
1969 QED_IOV_VALIDATE_Q_DISABLE) ||
1970 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1973 /* Acquire a new queue-cid */
1974 p_queue = &vf->vf_queues[req->rx_qid];
1976 memset(¶ms, 0, sizeof(params));
1977 params.queue_id = p_queue->fw_rx_qid;
1978 params.vport_id = vf->vport_id;
1979 params.stats_id = vf->abs_vf_id + 0x10;
1980 params.sb = req->hw_sb;
1981 params.sb_idx = req->sb_index;
1983 p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn,
1986 req->rx_qid, ¶ms);
1987 if (!p_queue->p_rx_cid)
1990 /* Legacy VFs have their Producers in a different location, which they
1991 * calculate on their own and clean the producer prior to this.
1993 if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1994 ETH_HSI_VER_NO_PKT_LEN_TUNN) {
1998 GTT_BAR0_MAP_REG_MSDM_RAM +
1999 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2002 p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
2004 rc = qed_eth_rxq_start_ramrod(p_hwfn,
2008 req->cqe_pbl_addr, req->cqe_pbl_size);
2010 status = PFVF_STATUS_FAILURE;
2011 qed_eth_queue_cid_release(p_hwfn, p_queue->p_rx_cid);
2012 p_queue->p_rx_cid = NULL;
2014 status = PFVF_STATUS_SUCCESS;
2015 vf->num_active_rxqs++;
2019 qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, b_legacy_vf);
2023 qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2024 struct qed_tunnel_info *p_tun,
2025 u16 tunn_feature_mask)
2027 p_resp->tunn_feature_mask = tunn_feature_mask;
2028 p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2029 p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2030 p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2031 p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2032 p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2033 p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2034 p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2035 p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2036 p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2037 p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2038 p_resp->geneve_udp_port = p_tun->geneve_port.port;
2039 p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2043 __qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2044 struct qed_tunn_update_type *p_tun,
2045 enum qed_tunn_mode mask, u8 tun_cls)
2047 if (p_req->tun_mode_update_mask & BIT(mask)) {
2048 p_tun->b_update_mode = true;
2050 if (p_req->tunn_mode & BIT(mask))
2051 p_tun->b_mode_enabled = true;
2054 p_tun->tun_cls = tun_cls;
2058 qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2059 struct qed_tunn_update_type *p_tun,
2060 struct qed_tunn_update_udp_port *p_port,
2061 enum qed_tunn_mode mask,
2062 u8 tun_cls, u8 update_port, u16 port)
2065 p_port->b_update_port = true;
2066 p_port->port = port;
2069 __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2073 qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2075 bool b_update_requested = false;
2077 if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2078 p_req->update_geneve_port || p_req->update_vxlan_port)
2079 b_update_requested = true;
2081 return b_update_requested;
2084 static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
2086 if (tun->b_update_mode && !tun->b_mode_enabled) {
2087 tun->b_update_mode = false;
2093 qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
2094 u16 *tun_features, bool *update,
2095 struct qed_tunnel_info *tun_src)
2097 struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
2098 struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
2099 u16 bultn_vxlan_port, bultn_geneve_port;
2100 void *cookie = p_hwfn->cdev->ops_cookie;
2103 *tun_features = p_hwfn->cdev->tunn_feature_mask;
2104 bultn_vxlan_port = tun->vxlan_port.port;
2105 bultn_geneve_port = tun->geneve_port.port;
2106 qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
2107 qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
2108 qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
2109 qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
2110 qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
2112 if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
2113 (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2114 tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2115 tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2116 tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
2117 tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
2118 tun_src->b_update_rx_cls = false;
2119 tun_src->b_update_tx_cls = false;
2123 if (tun_src->vxlan_port.b_update_port) {
2124 if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
2125 tun_src->vxlan_port.b_update_port = false;
2128 bultn_vxlan_port = tun_src->vxlan_port.port;
2132 if (tun_src->geneve_port.b_update_port) {
2133 if (tun_src->geneve_port.port == tun->geneve_port.port) {
2134 tun_src->geneve_port.b_update_port = false;
2137 bultn_geneve_port = tun_src->geneve_port.port;
2141 qed_for_each_vf(p_hwfn, i) {
2142 qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
2146 qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2147 ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
2152 static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
2153 struct qed_ptt *p_ptt,
2154 struct qed_vf_info *p_vf)
2156 struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
2157 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2158 struct pfvf_update_tunn_param_tlv *p_resp;
2159 struct vfpf_update_tunn_param_tlv *p_req;
2160 u8 status = PFVF_STATUS_SUCCESS;
2161 bool b_update_required = false;
2162 struct qed_tunnel_info tunn;
2163 u16 tunn_feature_mask = 0;
2166 mbx->offset = (u8 *)mbx->reply_virt;
2168 memset(&tunn, 0, sizeof(tunn));
2169 p_req = &mbx->req_virt->tunn_param_update;
2171 if (!qed_iov_pf_validate_tunn_param(p_req)) {
2172 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2173 "No tunnel update requested by VF\n");
2174 status = PFVF_STATUS_FAILURE;
2178 tunn.b_update_rx_cls = p_req->update_tun_cls;
2179 tunn.b_update_tx_cls = p_req->update_tun_cls;
2181 qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2182 QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2183 p_req->update_vxlan_port,
2185 qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2186 QED_MODE_L2GENEVE_TUNN,
2187 p_req->l2geneve_clss,
2188 p_req->update_geneve_port,
2189 p_req->geneve_port);
2190 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2191 QED_MODE_IPGENEVE_TUNN,
2192 p_req->ipgeneve_clss);
2193 __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2194 QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
2195 __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2196 QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
2198 /* If PF modifies VF's req then it should
2199 * still return an error in case of partial configuration
2200 * or modified configuration as opposed to requested one.
2202 rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
2203 &b_update_required, &tunn);
2206 status = PFVF_STATUS_FAILURE;
2208 /* If QED client is willing to update anything ? */
2209 if (b_update_required) {
2212 rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
2213 QED_SPQ_MODE_EBLOCK, NULL);
2215 status = PFVF_STATUS_FAILURE;
2217 geneve_port = p_tun->geneve_port.port;
2218 qed_for_each_vf(p_hwfn, i) {
2219 qed_iov_bulletin_set_udp_ports(p_hwfn, i,
2220 p_tun->vxlan_port.port,
2226 p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
2227 CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2229 qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2230 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2231 sizeof(struct channel_list_end_tlv));
2233 qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2236 static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
2237 struct qed_ptt *p_ptt,
2238 struct qed_vf_info *p_vf, u8 status)
2240 struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2241 struct pfvf_start_queue_resp_tlv *p_tlv;
2242 bool b_legacy = false;
2245 mbx->offset = (u8 *)mbx->reply_virt;
2247 /* Taking a bigger struct instead of adding a TLV to list was a
2248 * mistake, but one which we're now stuck with, as some older
2249 * clients assume the size of the previous response.
2251 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2252 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2256 length = sizeof(*p_tlv);
2258 length = sizeof(struct pfvf_def_resp_tlv);
2260 p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2262 qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2263 sizeof(struct channel_list_end_tlv));
2265 /* Update the TLV with the response */
2266 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2267 u16 qid = mbx->req_virt->start_txq.tx_qid;
2269 p_tlv->offset = qed_db_addr_vf(p_vf->vf_queues[qid].fw_cid,
2273 qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2276 static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
2277 struct qed_ptt *p_ptt,
2278 struct qed_vf_info *vf)
2280 struct qed_queue_start_common_params params;
2281 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2282 u8 status = PFVF_STATUS_NO_RESOURCE;
2283 struct vfpf_start_txq_tlv *req;
2284 struct qed_vf_q_info *p_queue;
2288 memset(¶ms, 0, sizeof(params));
2289 req = &mbx->req_virt->start_txq;
2291 if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2292 QED_IOV_VALIDATE_Q_DISABLE) ||
2293 !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2296 /* Acquire a new queue-cid */
2297 p_queue = &vf->vf_queues[req->tx_qid];
2299 params.queue_id = p_queue->fw_tx_qid;
2300 params.vport_id = vf->vport_id;
2301 params.stats_id = vf->abs_vf_id + 0x10;
2302 params.sb = req->hw_sb;
2303 params.sb_idx = req->sb_index;
2305 p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn,
2308 req->tx_qid, ¶ms);
2309 if (!p_queue->p_tx_cid)
2312 pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
2313 rc = qed_eth_txq_start_ramrod(p_hwfn, p_queue->p_tx_cid,
2314 req->pbl_addr, req->pbl_size, pq);
2316 status = PFVF_STATUS_FAILURE;
2317 qed_eth_queue_cid_release(p_hwfn, p_queue->p_tx_cid);
2318 p_queue->p_tx_cid = NULL;
2320 status = PFVF_STATUS_SUCCESS;
2324 qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
2327 static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
2328 struct qed_vf_info *vf,
2329 u16 rxq_id, bool cqe_completion)
2331 struct qed_vf_q_info *p_queue;
2334 if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id,
2335 QED_IOV_VALIDATE_Q_ENABLE)) {
2338 "VF[%d] Tried Closing Rx 0x%04x which is inactive\n",
2339 vf->relative_vf_id, rxq_id);
2343 p_queue = &vf->vf_queues[rxq_id];
2345 rc = qed_eth_rx_queue_stop(p_hwfn,
2347 false, cqe_completion);
2351 p_queue->p_rx_cid = NULL;
2352 vf->num_active_rxqs--;
2357 static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
2358 struct qed_vf_info *vf, u16 txq_id)
2360 struct qed_vf_q_info *p_queue;
2363 if (!qed_iov_validate_txq(p_hwfn, vf, txq_id,
2364 QED_IOV_VALIDATE_Q_ENABLE))
2367 p_queue = &vf->vf_queues[txq_id];
2369 rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->p_tx_cid);
2373 p_queue->p_tx_cid = NULL;
2378 static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
2379 struct qed_ptt *p_ptt,
2380 struct qed_vf_info *vf)
2382 u16 length = sizeof(struct pfvf_def_resp_tlv);
2383 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2384 u8 status = PFVF_STATUS_FAILURE;
2385 struct vfpf_stop_rxqs_tlv *req;
2388 /* There has never been an official driver that used this interface
2389 * for stopping multiple queues, and it is now considered deprecated.
2390 * Validate this isn't used here.
2392 req = &mbx->req_virt->stop_rxqs;
2393 if (req->num_rxqs != 1) {
2394 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2395 "Odd; VF[%d] tried stopping multiple Rx queues\n",
2396 vf->relative_vf_id);
2397 status = PFVF_STATUS_NOT_SUPPORTED;
2401 rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2402 req->cqe_completion);
2404 status = PFVF_STATUS_SUCCESS;
2406 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2410 static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
2411 struct qed_ptt *p_ptt,
2412 struct qed_vf_info *vf)
2414 u16 length = sizeof(struct pfvf_def_resp_tlv);
2415 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2416 u8 status = PFVF_STATUS_FAILURE;
2417 struct vfpf_stop_txqs_tlv *req;
2420 /* There has never been an official driver that used this interface
2421 * for stopping multiple queues, and it is now considered deprecated.
2422 * Validate this isn't used here.
2424 req = &mbx->req_virt->stop_txqs;
2425 if (req->num_txqs != 1) {
2426 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2427 "Odd; VF[%d] tried stopping multiple Tx queues\n",
2428 vf->relative_vf_id);
2429 status = PFVF_STATUS_NOT_SUPPORTED;
2432 rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid);
2434 status = PFVF_STATUS_SUCCESS;
2437 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2441 static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
2442 struct qed_ptt *p_ptt,
2443 struct qed_vf_info *vf)
2445 struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
2446 u16 length = sizeof(struct pfvf_def_resp_tlv);
2447 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2448 struct vfpf_update_rxq_tlv *req;
2449 u8 status = PFVF_STATUS_FAILURE;
2450 u8 complete_event_flg;
2451 u8 complete_cqe_flg;
2456 req = &mbx->req_virt->update_rxq;
2457 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2458 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2460 /* Validate inputs */
2461 for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++)
2462 if (!qed_iov_validate_rxq(p_hwfn, vf, i,
2463 QED_IOV_VALIDATE_Q_ENABLE)) {
2464 DP_INFO(p_hwfn, "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2465 vf->relative_vf_id, req->rx_qid, req->num_rxqs);
2469 /* Prepare the handlers */
2470 for (i = 0; i < req->num_rxqs; i++) {
2471 qid = req->rx_qid + i;
2472 handlers[i] = vf->vf_queues[qid].p_rx_cid;
2475 rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2479 QED_SPQ_MODE_EBLOCK, NULL);
2483 status = PFVF_STATUS_SUCCESS;
2485 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2489 void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
2490 void *p_tlvs_list, u16 req_type)
2492 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2496 if (!p_tlv->length) {
2497 DP_NOTICE(p_hwfn, "Zero length TLV found\n");
2501 if (p_tlv->type == req_type) {
2502 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2503 "Extended tlv type %d, length %d found\n",
2504 p_tlv->type, p_tlv->length);
2508 len += p_tlv->length;
2509 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2511 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2512 DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
2515 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2521 qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
2522 struct qed_sp_vport_update_params *p_data,
2523 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2525 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2526 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2528 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2529 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2533 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2534 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2535 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2536 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2537 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
2541 qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
2542 struct qed_sp_vport_update_params *p_data,
2543 struct qed_vf_info *p_vf,
2544 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2546 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2547 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2549 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2550 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2554 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2556 /* Ignore the VF request if we're forcing a vlan */
2557 if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
2558 p_data->update_inner_vlan_removal_flg = 1;
2559 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2562 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
2566 qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
2567 struct qed_sp_vport_update_params *p_data,
2568 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2570 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2571 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2573 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2574 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2576 if (!p_tx_switch_tlv)
2579 p_data->update_tx_switching_flg = 1;
2580 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2581 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
2585 qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2586 struct qed_sp_vport_update_params *p_data,
2587 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2589 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2590 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2592 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2593 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2597 p_data->update_approx_mcast_flg = 1;
2598 memcpy(p_data->bins, p_mcast_tlv->bins,
2599 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2600 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2604 qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
2605 struct qed_sp_vport_update_params *p_data,
2606 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2608 struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
2609 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2610 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2612 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2613 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2617 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2618 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2619 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2620 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2621 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
2625 qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
2626 struct qed_sp_vport_update_params *p_data,
2627 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2629 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2630 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2632 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2633 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
2635 if (!p_accept_any_vlan)
2638 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2639 p_data->update_accept_any_vlan_flg =
2640 p_accept_any_vlan->update_accept_any_vlan_flg;
2641 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2645 qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
2646 struct qed_vf_info *vf,
2647 struct qed_sp_vport_update_params *p_data,
2648 struct qed_rss_params *p_rss,
2649 struct qed_iov_vf_mbx *p_mbx,
2650 u16 *tlvs_mask, u16 *tlvs_accepted)
2652 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2653 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2654 bool b_reject = false;
2658 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2659 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2661 p_data->rss_params = NULL;
2665 memset(p_rss, 0, sizeof(struct qed_rss_params));
2667 p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
2668 VFPF_UPDATE_RSS_CONFIG_FLAG);
2669 p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
2670 VFPF_UPDATE_RSS_CAPS_FLAG);
2671 p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
2672 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2673 p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
2674 VFPF_UPDATE_RSS_KEY_FLAG);
2676 p_rss->rss_enable = p_rss_tlv->rss_enable;
2677 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2678 p_rss->rss_caps = p_rss_tlv->rss_caps;
2679 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2680 memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
2682 table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
2683 (1 << p_rss_tlv->rss_table_size_log));
2685 for (i = 0; i < table_size; i++) {
2686 q_idx = p_rss_tlv->rss_ind_table[i];
2687 if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
2688 QED_IOV_VALIDATE_Q_ENABLE)) {
2691 "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2692 vf->relative_vf_id, q_idx);
2697 p_rss->rss_ind_table[i] = vf->vf_queues[q_idx].p_rx_cid;
2700 p_data->rss_params = p_rss;
2702 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
2704 *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
2708 qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
2709 struct qed_vf_info *vf,
2710 struct qed_sp_vport_update_params *p_data,
2711 struct qed_sge_tpa_params *p_sge_tpa,
2712 struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2714 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2715 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2717 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2718 qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2720 if (!p_sge_tpa_tlv) {
2721 p_data->sge_tpa_params = NULL;
2725 memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
2727 p_sge_tpa->update_tpa_en_flg =
2728 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2729 p_sge_tpa->update_tpa_param_flg =
2730 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2731 VFPF_UPDATE_TPA_PARAM_FLAG);
2733 p_sge_tpa->tpa_ipv4_en_flg =
2734 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2735 p_sge_tpa->tpa_ipv6_en_flg =
2736 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2737 p_sge_tpa->tpa_pkt_split_flg =
2738 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2739 p_sge_tpa->tpa_hdr_data_split_flg =
2740 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2741 p_sge_tpa->tpa_gro_consistent_flg =
2742 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2744 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2745 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2746 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2747 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2748 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2750 p_data->sge_tpa_params = p_sge_tpa;
2752 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
2755 static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
2757 struct qed_sp_vport_update_params *params,
2760 u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
2761 struct qed_filter_accept_flags *flags = ¶ms->accept_flags;
2762 struct qed_public_vf_info *vf_info;
2764 /* Untrusted VFs can't even be trusted to know that fact.
2765 * Simply indicate everything is configured fine, and trace
2766 * configuration 'behind their back'.
2768 if (!(*tlvs & BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM)))
2771 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
2773 if (flags->update_rx_mode_config) {
2774 vf_info->rx_accept_mode = flags->rx_accept_filter;
2775 if (!vf_info->is_trusted_configured)
2776 flags->rx_accept_filter &= ~mask;
2779 if (flags->update_tx_mode_config) {
2780 vf_info->tx_accept_mode = flags->tx_accept_filter;
2781 if (!vf_info->is_trusted_configured)
2782 flags->tx_accept_filter &= ~mask;
2788 static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
2789 struct qed_ptt *p_ptt,
2790 struct qed_vf_info *vf)
2792 struct qed_rss_params *p_rss_params = NULL;
2793 struct qed_sp_vport_update_params params;
2794 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
2795 struct qed_sge_tpa_params sge_tpa_params;
2796 u16 tlvs_mask = 0, tlvs_accepted = 0;
2797 u8 status = PFVF_STATUS_SUCCESS;
2801 /* Valiate PF can send such a request */
2802 if (!vf->vport_instance) {
2805 "No VPORT instance available for VF[%d], failing vport update\n",
2807 status = PFVF_STATUS_FAILURE;
2810 p_rss_params = vzalloc(sizeof(*p_rss_params));
2811 if (p_rss_params == NULL) {
2812 status = PFVF_STATUS_FAILURE;
2816 memset(¶ms, 0, sizeof(params));
2817 params.opaque_fid = vf->opaque_fid;
2818 params.vport_id = vf->vport_id;
2819 params.rss_params = NULL;
2821 /* Search for extended tlvs list and update values
2822 * from VF in struct qed_sp_vport_update_params.
2824 qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2825 qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
2826 qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
2827 qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2828 qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
2829 qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
2830 qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
2831 &sge_tpa_params, mbx, &tlvs_mask);
2833 tlvs_accepted = tlvs_mask;
2835 /* Some of the extended TLVs need to be validated first; In that case,
2836 * they can update the mask without updating the accepted [so that
2837 * PF could communicate to VF it has rejected request].
2839 qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params,
2840 mbx, &tlvs_mask, &tlvs_accepted);
2842 if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
2843 ¶ms, &tlvs_accepted)) {
2845 status = PFVF_STATUS_NOT_SUPPORTED;
2849 if (!tlvs_accepted) {
2851 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2852 "Upper-layer prevents VF vport configuration\n");
2854 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2855 "No feature tlvs found for vport update\n");
2856 status = PFVF_STATUS_NOT_SUPPORTED;
2860 rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL);
2863 status = PFVF_STATUS_FAILURE;
2866 vfree(p_rss_params);
2867 length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2868 tlvs_mask, tlvs_accepted);
2869 qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2872 static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
2873 struct qed_vf_info *p_vf,
2874 struct qed_filter_ucast *p_params)
2878 /* First remove entries and then add new ones */
2879 if (p_params->opcode == QED_FILTER_REMOVE) {
2880 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2881 if (p_vf->shadow_config.vlans[i].used &&
2882 p_vf->shadow_config.vlans[i].vid ==
2884 p_vf->shadow_config.vlans[i].used = false;
2887 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2890 "VF [%d] - Tries to remove a non-existing vlan\n",
2891 p_vf->relative_vf_id);
2894 } else if (p_params->opcode == QED_FILTER_REPLACE ||
2895 p_params->opcode == QED_FILTER_FLUSH) {
2896 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2897 p_vf->shadow_config.vlans[i].used = false;
2900 /* In forced mode, we're willing to remove entries - but we don't add
2903 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
2906 if (p_params->opcode == QED_FILTER_ADD ||
2907 p_params->opcode == QED_FILTER_REPLACE) {
2908 for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
2909 if (p_vf->shadow_config.vlans[i].used)
2912 p_vf->shadow_config.vlans[i].used = true;
2913 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
2917 if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
2920 "VF [%d] - Tries to configure more than %d vlan filters\n",
2921 p_vf->relative_vf_id,
2922 QED_ETH_VF_NUM_VLAN_FILTERS + 1);
2930 static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
2931 struct qed_vf_info *p_vf,
2932 struct qed_filter_ucast *p_params)
2936 /* If we're in forced-mode, we don't allow any change */
2937 if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
2940 /* First remove entries and then add new ones */
2941 if (p_params->opcode == QED_FILTER_REMOVE) {
2942 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2943 if (ether_addr_equal(p_vf->shadow_config.macs[i],
2945 eth_zero_addr(p_vf->shadow_config.macs[i]);
2950 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2951 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2952 "MAC isn't configured\n");
2955 } else if (p_params->opcode == QED_FILTER_REPLACE ||
2956 p_params->opcode == QED_FILTER_FLUSH) {
2957 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
2958 eth_zero_addr(p_vf->shadow_config.macs[i]);
2961 /* List the new MAC address */
2962 if (p_params->opcode != QED_FILTER_ADD &&
2963 p_params->opcode != QED_FILTER_REPLACE)
2966 for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
2967 if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
2968 ether_addr_copy(p_vf->shadow_config.macs[i],
2970 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2971 "Added MAC at %d entry in shadow\n", i);
2976 if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
2977 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
2985 qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
2986 struct qed_vf_info *p_vf,
2987 struct qed_filter_ucast *p_params)
2991 if (p_params->type == QED_FILTER_MAC) {
2992 rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
2997 if (p_params->type == QED_FILTER_VLAN)
2998 rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3003 static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
3004 int vfid, struct qed_filter_ucast *params)
3006 struct qed_public_vf_info *vf;
3008 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
3012 /* No real decision to make; Store the configured MAC */
3013 if (params->type == QED_FILTER_MAC ||
3014 params->type == QED_FILTER_MAC_VLAN)
3015 ether_addr_copy(vf->mac, params->mac);
3020 static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
3021 struct qed_ptt *p_ptt,
3022 struct qed_vf_info *vf)
3024 struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3025 struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
3026 struct vfpf_ucast_filter_tlv *req;
3027 u8 status = PFVF_STATUS_SUCCESS;
3028 struct qed_filter_ucast params;
3031 /* Prepare the unicast filter params */
3032 memset(¶ms, 0, sizeof(struct qed_filter_ucast));
3033 req = &mbx->req_virt->ucast_filter;
3034 params.opcode = (enum qed_filter_opcode)req->opcode;
3035 params.type = (enum qed_filter_ucast_type)req->type;
3037 params.is_rx_filter = 1;
3038 params.is_tx_filter = 1;
3039 params.vport_to_remove_from = vf->vport_id;
3040 params.vport_to_add_to = vf->vport_id;
3041 memcpy(params.mac, req->mac, ETH_ALEN);
3042 params.vlan = req->vlan;
3046 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3047 vf->abs_vf_id, params.opcode, params.type,
3048 params.is_rx_filter ? "RX" : "",
3049 params.is_tx_filter ? "TX" : "",
3050 params.vport_to_add_to,
3051 params.mac[0], params.mac[1],
3052 params.mac[2], params.mac[3],
3053 params.mac[4], params.mac[5], params.vlan);
3055 if (!vf->vport_instance) {
3058 "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
3060 status = PFVF_STATUS_FAILURE;
3064 /* Update shadow copy of the VF configuration */
3065 if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) {
3066 status = PFVF_STATUS_FAILURE;
3070 /* Determine if the unicast filtering is acceptible by PF */
3071 if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
3072 (params.type == QED_FILTER_VLAN ||
3073 params.type == QED_FILTER_MAC_VLAN)) {
3074 /* Once VLAN is forced or PVID is set, do not allow
3075 * to add/replace any further VLANs.
3077 if (params.opcode == QED_FILTER_ADD ||
3078 params.opcode == QED_FILTER_REPLACE)
3079 status = PFVF_STATUS_FORCED;
3083 if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
3084 (params.type == QED_FILTER_MAC ||
3085 params.type == QED_FILTER_MAC_VLAN)) {
3086 if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
3087 (params.opcode != QED_FILTER_ADD &&
3088 params.opcode != QED_FILTER_REPLACE))
3089 status = PFVF_STATUS_FORCED;
3093 rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms);
3095 status = PFVF_STATUS_FAILURE;
3099 rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
3100 QED_SPQ_MODE_CB, NULL);
3102 status = PFVF_STATUS_FAILURE;
3105 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3106 sizeof(struct pfvf_def_resp_tlv), status);
3109 static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
3110 struct qed_ptt *p_ptt,
3111 struct qed_vf_info *vf)
3116 for (i = 0; i < vf->num_sbs; i++)
3117 qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3119 vf->opaque_fid, false);
3121 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3122 sizeof(struct pfvf_def_resp_tlv),
3123 PFVF_STATUS_SUCCESS);
3126 static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
3127 struct qed_ptt *p_ptt, struct qed_vf_info *vf)
3129 u16 length = sizeof(struct pfvf_def_resp_tlv);
3130 u8 status = PFVF_STATUS_SUCCESS;
3132 /* Disable Interrupts for VF */
3133 qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3135 /* Reset Permission table */
3136 qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3138 qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3142 static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
3143 struct qed_ptt *p_ptt,
3144 struct qed_vf_info *p_vf)
3146 u16 length = sizeof(struct pfvf_def_resp_tlv);
3147 u8 status = PFVF_STATUS_SUCCESS;
3150 qed_iov_vf_cleanup(p_hwfn, p_vf);
3152 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3153 /* Stopping the VF */
3154 rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3158 DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
3160 status = PFVF_STATUS_FAILURE;
3163 p_vf->state = VF_STOPPED;
3166 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3171 qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
3172 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3177 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
3179 for (cnt = 0; cnt < 50; cnt++) {
3180 val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3185 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
3189 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3190 p_vf->abs_vf_id, val);
3198 qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
3199 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3201 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
3204 /* Read initial consumers & producers */
3205 for (i = 0; i < MAX_NUM_VOQS; i++) {
3208 cons[i] = qed_rd(p_hwfn, p_ptt,
3209 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3211 prod = qed_rd(p_hwfn, p_ptt,
3212 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3214 distance[i] = prod - cons[i];
3217 /* Wait for consumers to pass the producers */
3219 for (cnt = 0; cnt < 50; cnt++) {
3220 for (; i < MAX_NUM_VOQS; i++) {
3223 tmp = qed_rd(p_hwfn, p_ptt,
3224 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3226 if (distance[i] > tmp - cons[i])
3230 if (i == MAX_NUM_VOQS)
3237 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3238 p_vf->abs_vf_id, i);
3245 static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
3246 struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
3250 rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3254 rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3262 qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
3263 struct qed_ptt *p_ptt,
3264 u16 rel_vf_id, u32 *ack_vfs)
3266 struct qed_vf_info *p_vf;
3269 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3273 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3274 (1ULL << (rel_vf_id % 64))) {
3275 u16 vfid = p_vf->abs_vf_id;
3277 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3278 "VF[%d] - Handling FLR\n", vfid);
3280 qed_iov_vf_cleanup(p_hwfn, p_vf);
3282 /* If VF isn't active, no need for anything but SW */
3286 rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3290 rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
3292 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3296 /* Workaround to make VF-PF channel ready, as FW
3297 * doesn't do that as a part of FLR.
3300 GTT_BAR0_MAP_REG_USDM_RAM +
3301 USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3303 /* VF_STOPPED has to be set only after final cleanup
3304 * but prior to re-enabling the VF.
3306 p_vf->state = VF_STOPPED;
3308 rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3310 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3315 /* Mark VF for ack and clean pending state */
3316 if (p_vf->state == VF_RESET)
3317 p_vf->state = VF_STOPPED;
3318 ack_vfs[vfid / 32] |= BIT((vfid % 32));
3319 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3320 ~(1ULL << (rel_vf_id % 64));
3321 p_vf->vf_mbx.b_pending_msg = false;
3328 qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3330 u32 ack_vfs[VF_MAX_STATIC / 32];
3334 memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3336 /* Since BRB <-> PRS interface can't be tested as part of the flr
3337 * polling due to HW limitations, simply sleep a bit. And since
3338 * there's no need to wait per-vf, do it before looping.
3342 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
3343 qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3345 rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3349 bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
3354 DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
3355 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3356 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3357 "[%08x,...,%08x]: %08x\n",
3358 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3360 if (!p_hwfn->cdev->p_iov_info) {
3361 DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
3366 for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
3367 struct qed_vf_info *p_vf;
3370 p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
3374 vfid = p_vf->abs_vf_id;
3375 if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3376 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3377 u16 rel_vf_id = p_vf->relative_vf_id;
3379 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3380 "VF[%d] [rel %d] got FLR-ed\n",
3383 p_vf->state = VF_RESET;
3385 /* No need to lock here, since pending_flr should
3386 * only change here and before ACKing MFw. Since
3387 * MFW will not trigger an additional attention for
3388 * VF flr until ACKs, we're safe.
3390 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3398 static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
3400 struct qed_mcp_link_params *p_params,
3401 struct qed_mcp_link_state *p_link,
3402 struct qed_mcp_link_capabilities *p_caps)
3404 struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
3407 struct qed_bulletin_content *p_bulletin;
3412 p_bulletin = p_vf->bulletin.p_virt;
3415 __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3417 __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3419 __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3422 static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
3423 struct qed_ptt *p_ptt, int vfid)
3425 struct qed_iov_vf_mbx *mbx;
3426 struct qed_vf_info *p_vf;
3428 p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3432 mbx = &p_vf->vf_mbx;
3434 /* qed_iov_process_mbx_request */
3435 if (!mbx->b_pending_msg) {
3437 "VF[%02x]: Trying to process mailbox message when none is pending\n",
3441 mbx->b_pending_msg = false;
3443 mbx->first_tlv = mbx->req_virt->first_tlv;
3445 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3446 "VF[%02x]: Processing mailbox message [type %04x]\n",
3447 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3449 /* check if tlv type is known */
3450 if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3451 !p_vf->b_malicious) {
3452 switch (mbx->first_tlv.tl.type) {
3453 case CHANNEL_TLV_ACQUIRE:
3454 qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3456 case CHANNEL_TLV_VPORT_START:
3457 qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3459 case CHANNEL_TLV_VPORT_TEARDOWN:
3460 qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3462 case CHANNEL_TLV_START_RXQ:
3463 qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3465 case CHANNEL_TLV_START_TXQ:
3466 qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3468 case CHANNEL_TLV_STOP_RXQS:
3469 qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3471 case CHANNEL_TLV_STOP_TXQS:
3472 qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3474 case CHANNEL_TLV_UPDATE_RXQ:
3475 qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3477 case CHANNEL_TLV_VPORT_UPDATE:
3478 qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3480 case CHANNEL_TLV_UCAST_FILTER:
3481 qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3483 case CHANNEL_TLV_CLOSE:
3484 qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3486 case CHANNEL_TLV_INT_CLEANUP:
3487 qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3489 case CHANNEL_TLV_RELEASE:
3490 qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3492 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3493 qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3496 } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3497 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3498 "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3499 p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3501 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3502 mbx->first_tlv.tl.type,
3503 sizeof(struct pfvf_def_resp_tlv),
3504 PFVF_STATUS_MALICIOUS);
3506 /* unknown TLV - this may belong to a VF driver from the future
3507 * - a version written after this PF driver was written, which
3508 * supports features unknown as of yet. Too bad since we don't
3509 * support them. Or this may be because someone wrote a crappy
3510 * VF driver and is sending garbage over the channel.
3513 "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
3515 mbx->first_tlv.tl.type,
3516 mbx->first_tlv.tl.length,
3517 mbx->first_tlv.padding, mbx->first_tlv.reply_address);
3519 /* Try replying in case reply address matches the acquisition's
3522 if (p_vf->acquire.first_tlv.reply_address &&
3523 (mbx->first_tlv.reply_address ==
3524 p_vf->acquire.first_tlv.reply_address)) {
3525 qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3526 mbx->first_tlv.tl.type,
3527 sizeof(struct pfvf_def_resp_tlv),
3528 PFVF_STATUS_NOT_SUPPORTED);
3532 "VF[%02x]: Can't respond to TLV - no valid reply address\n",
3538 void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
3542 memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
3544 qed_for_each_vf(p_hwfn, i) {
3545 struct qed_vf_info *p_vf;
3547 p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
3548 if (p_vf->vf_mbx.b_pending_msg)
3549 events[i / 64] |= 1ULL << (i % 64);
3553 static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
3556 u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
3558 if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3561 "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
3566 return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
3569 static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
3570 u16 abs_vfid, struct regpair *vf_msg)
3572 struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
3578 /* List the physical address of the request so that handler
3579 * could later on copy the message from it.
3581 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3583 /* Mark the event and schedule the workqueue */
3584 p_vf->vf_mbx.b_pending_msg = true;
3585 qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
3590 static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
3591 struct malicious_vf_eqe_data *p_data)
3593 struct qed_vf_info *p_vf;
3595 p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
3600 if (!p_vf->b_malicious) {
3602 "VF [%d] - Malicious behavior [%02x]\n",
3603 p_vf->abs_vf_id, p_data->err_id);
3605 p_vf->b_malicious = true;
3608 "VF [%d] - Malicious behavior [%02x]\n",
3609 p_vf->abs_vf_id, p_data->err_id);
3613 int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
3614 u8 opcode, __le16 echo, union event_ring_data *data)
3617 case COMMON_EVENT_VF_PF_CHANNEL:
3618 return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
3619 &data->vf_pf_channel.msg_addr);
3620 case COMMON_EVENT_MALICIOUS_VF:
3621 qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
3624 DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
3630 u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
3632 struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
3638 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
3639 if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
3646 static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
3649 struct qed_dmae_params params;
3650 struct qed_vf_info *vf_info;
3652 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3656 memset(¶ms, 0, sizeof(struct qed_dmae_params));
3657 params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
3658 params.src_vfid = vf_info->abs_vf_id;
3660 if (qed_dmae_host2host(p_hwfn, ptt,
3661 vf_info->vf_mbx.pending_req,
3662 vf_info->vf_mbx.req_phys,
3663 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
3664 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3665 "Failed to copy message from VF 0x%02x\n", vfid);
3673 static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
3676 struct qed_vf_info *vf_info;
3679 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3681 DP_NOTICE(p_hwfn->cdev,
3682 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3686 if (vf_info->b_malicious) {
3687 DP_NOTICE(p_hwfn->cdev,
3688 "Can't set forced MAC to malicious VF [%d]\n", vfid);
3692 feature = 1 << MAC_ADDR_FORCED;
3693 memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3695 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3696 /* Forced MAC will disable MAC_ADDR */
3697 vf_info->bulletin.p_virt->valid_bitmap &= ~BIT(VFPF_BULLETIN_MAC_ADDR);
3699 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3702 static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
3705 struct qed_vf_info *vf_info;
3708 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3710 DP_NOTICE(p_hwfn->cdev,
3711 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3715 if (vf_info->b_malicious) {
3716 DP_NOTICE(p_hwfn->cdev,
3717 "Can't set forced vlan to malicious VF [%d]\n", vfid);
3721 feature = 1 << VLAN_ADDR_FORCED;
3722 vf_info->bulletin.p_virt->pvid = pvid;
3724 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3726 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
3728 qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3731 void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
3732 int vfid, u16 vxlan_port, u16 geneve_port)
3734 struct qed_vf_info *vf_info;
3736 vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3738 DP_NOTICE(p_hwfn->cdev,
3739 "Can not set udp ports, invalid vfid [%d]\n", vfid);
3743 if (vf_info->b_malicious) {
3744 DP_VERBOSE(p_hwfn, QED_MSG_IOV,
3745 "Can not set udp ports to malicious VF [%d]\n",
3750 vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
3751 vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
3754 static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
3756 struct qed_vf_info *p_vf_info;
3758 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3762 return !!p_vf_info->vport_instance;
3765 static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
3767 struct qed_vf_info *p_vf_info;
3769 p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3773 return p_vf_info->state == VF_STOPPED;
3776 static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
3778 struct qed_vf_info *vf_info;
3780 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3784 return vf_info->spoof_chk;
3787 static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
3789 struct qed_vf_info *vf;
3792 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3794 "SR-IOV sanity check failed, can't set spoofchk\n");
3798 vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3802 if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
3803 /* After VF VPORT start PF will configure spoof check */
3804 vf->req_spoofchk_val = val;
3809 rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
3815 static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
3818 struct qed_vf_info *p_vf;
3820 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3821 if (!p_vf || !p_vf->bulletin.p_virt)
3824 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
3827 return p_vf->bulletin.p_virt->mac;
3831 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
3833 struct qed_vf_info *p_vf;
3835 p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3836 if (!p_vf || !p_vf->bulletin.p_virt)
3839 if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
3842 return p_vf->bulletin.p_virt->pvid;
3845 static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
3846 struct qed_ptt *p_ptt, int vfid, int val)
3848 struct qed_vf_info *vf;
3852 vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3856 rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
3860 return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
3864 qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
3866 struct qed_vf_info *vf;
3870 for_each_hwfn(cdev, i) {
3871 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3873 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
3875 "SR-IOV sanity check failed, can't set min rate\n");
3880 vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
3881 vport_id = vf->vport_id;
3883 return qed_configure_vport_wfq(cdev, vport_id, rate);
3886 static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
3888 struct qed_wfq_data *vf_vp_wfq;
3889 struct qed_vf_info *vf_info;
3891 vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
3895 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
3897 if (vf_vp_wfq->configured)
3898 return vf_vp_wfq->min_speed;
3904 * qed_schedule_iov - schedules IOV task for VF and PF
3905 * @hwfn: hardware function pointer
3906 * @flag: IOV flag for VF/PF
3908 void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
3910 smp_mb__before_atomic();
3911 set_bit(flag, &hwfn->iov_task_flags);
3912 smp_mb__after_atomic();
3913 DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3914 queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
3917 void qed_vf_start_iov_wq(struct qed_dev *cdev)
3921 for_each_hwfn(cdev, i)
3922 queue_delayed_work(cdev->hwfns[i].iov_wq,
3923 &cdev->hwfns[i].iov_task, 0);
3926 int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
3930 for_each_hwfn(cdev, i)
3931 if (cdev->hwfns[i].iov_wq)
3932 flush_workqueue(cdev->hwfns[i].iov_wq);
3934 /* Mark VFs for disablement */
3935 qed_iov_set_vfs_to_disable(cdev, true);
3937 if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
3938 pci_disable_sriov(cdev->pdev);
3940 for_each_hwfn(cdev, i) {
3941 struct qed_hwfn *hwfn = &cdev->hwfns[i];
3942 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
3944 /* Failure to acquire the ptt in 100g creates an odd error
3945 * where the first engine has already relased IOV.
3948 DP_ERR(hwfn, "Failed to acquire ptt\n");
3952 /* Clean WFQ db and configure equal weight for all vports */
3953 qed_clean_wfq_db(hwfn, ptt);
3955 qed_for_each_vf(hwfn, j) {
3958 if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
3961 /* Wait until VF is disabled before releasing */
3962 for (k = 0; k < 100; k++) {
3963 if (!qed_iov_is_vf_stopped(hwfn, j))
3970 qed_iov_release_hw_for_vf(&cdev->hwfns[i],
3974 "Timeout waiting for VF's FLR to end\n");
3977 qed_ptt_release(hwfn, ptt);
3980 qed_iov_set_vfs_to_disable(cdev, false);
3985 static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
3987 struct qed_iov_vf_init_params *params)
3991 /* Since we have an equal resource distribution per-VF, and we assume
3992 * PF has acquired the QED_PF_L2_QUE first queues, we start setting
3993 * sequentially from there.
3995 base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
3997 params->rel_vf_id = vfid;
3998 for (i = 0; i < params->num_queues; i++) {
3999 params->req_rx_queue[i] = base + i;
4000 params->req_tx_queue[i] = base + i;
4004 static int qed_sriov_enable(struct qed_dev *cdev, int num)
4006 struct qed_iov_vf_init_params params;
4009 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
4010 DP_NOTICE(cdev, "Can start at most %d VFs\n",
4011 RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
4015 memset(¶ms, 0, sizeof(params));
4017 /* Initialize HW for VF access */
4018 for_each_hwfn(cdev, j) {
4019 struct qed_hwfn *hwfn = &cdev->hwfns[j];
4020 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4022 /* Make sure not to use more than 16 queues per VF */
4023 params.num_queues = min_t(int,
4024 FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
4028 DP_ERR(hwfn, "Failed to acquire ptt\n");
4033 for (i = 0; i < num; i++) {
4034 if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
4037 qed_sriov_enable_qid_config(hwfn, i, ¶ms);
4038 rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms);
4040 DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
4041 qed_ptt_release(hwfn, ptt);
4046 qed_ptt_release(hwfn, ptt);
4049 /* Enable SRIOV PCIe functions */
4050 rc = pci_enable_sriov(cdev->pdev, num);
4052 DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
4059 qed_sriov_disable(cdev, false);
4063 static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
4065 if (!IS_QED_SRIOV(cdev)) {
4066 DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
4071 return qed_sriov_enable(cdev, num_vfs_param);
4073 return qed_sriov_disable(cdev, true);
4076 static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
4080 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4081 DP_VERBOSE(cdev, QED_MSG_IOV,
4082 "Cannot set a VF MAC; Sriov is not enabled\n");
4086 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4087 DP_VERBOSE(cdev, QED_MSG_IOV,
4088 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4092 for_each_hwfn(cdev, i) {
4093 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4094 struct qed_public_vf_info *vf_info;
4096 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4100 /* Set the forced MAC, and schedule the IOV task */
4101 ether_addr_copy(vf_info->forced_mac, mac);
4102 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4108 static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
4112 if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
4113 DP_VERBOSE(cdev, QED_MSG_IOV,
4114 "Cannot set a VF MAC; Sriov is not enabled\n");
4118 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
4119 DP_VERBOSE(cdev, QED_MSG_IOV,
4120 "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
4124 for_each_hwfn(cdev, i) {
4125 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4126 struct qed_public_vf_info *vf_info;
4128 vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
4132 /* Set the forced vlan, and schedule the IOV task */
4133 vf_info->forced_vlan = vid;
4134 qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
4140 static int qed_get_vf_config(struct qed_dev *cdev,
4141 int vf_id, struct ifla_vf_info *ivi)
4143 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
4144 struct qed_public_vf_info *vf_info;
4145 struct qed_mcp_link_state link;
4148 /* Sanitize request */
4152 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
4153 DP_VERBOSE(cdev, QED_MSG_IOV,
4154 "VF index [%d] isn't active\n", vf_id);
4158 vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4160 qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
4162 /* Fill information about VF */
4165 if (is_valid_ether_addr(vf_info->forced_mac))
4166 ether_addr_copy(ivi->mac, vf_info->forced_mac);
4168 ether_addr_copy(ivi->mac, vf_info->mac);
4170 ivi->vlan = vf_info->forced_vlan;
4171 ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
4172 ivi->linkstate = vf_info->link_state;
4173 tx_rate = vf_info->tx_rate;
4174 ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
4175 ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
4180 void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
4182 struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
4183 struct qed_mcp_link_capabilities caps;
4184 struct qed_mcp_link_params params;
4185 struct qed_mcp_link_state link;
4188 if (!hwfn->pf_iov_info)
4191 /* Update bulletin of all future possible VFs with link configuration */
4192 for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
4193 struct qed_public_vf_info *vf_info;
4195 vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
4199 /* Only hwfn0 is actually interested in the link speed.
4200 * But since only it would receive an MFW indication of link,
4201 * need to take configuration from it - otherwise things like
4202 * rate limiting for hwfn1 VF would not work.
4204 memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn),
4206 memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
4207 memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
4210 /* Modify link according to the VF's configured link state */
4211 switch (vf_info->link_state) {
4212 case IFLA_VF_LINK_STATE_DISABLE:
4213 link.link_up = false;
4215 case IFLA_VF_LINK_STATE_ENABLE:
4216 link.link_up = true;
4217 /* Set speed according to maximum supported by HW.
4218 * that is 40G for regular devices and 100G for CMT
4221 link.speed = (hwfn->cdev->num_hwfns > 1) ?
4224 /* In auto mode pass PF link image to VF */
4228 if (link.link_up && vf_info->tx_rate) {
4229 struct qed_ptt *ptt;
4232 rate = min_t(int, vf_info->tx_rate, link.speed);
4234 ptt = qed_ptt_acquire(hwfn);
4236 DP_NOTICE(hwfn, "Failed to acquire PTT\n");
4240 if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
4241 vf_info->tx_rate = rate;
4245 qed_ptt_release(hwfn, ptt);
4248 qed_iov_set_link(hwfn, i, ¶ms, &link, &caps);
4251 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4254 static int qed_set_vf_link_state(struct qed_dev *cdev,
4255 int vf_id, int link_state)
4259 /* Sanitize request */
4263 if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
4264 DP_VERBOSE(cdev, QED_MSG_IOV,
4265 "VF index [%d] isn't active\n", vf_id);
4269 /* Handle configuration of link state */
4270 for_each_hwfn(cdev, i) {
4271 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4272 struct qed_public_vf_info *vf;
4274 vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
4278 if (vf->link_state == link_state)
4281 vf->link_state = link_state;
4282 qed_inform_vf_link_state(&cdev->hwfns[i]);
4288 static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
4290 int i, rc = -EINVAL;
4292 for_each_hwfn(cdev, i) {
4293 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4295 rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
4303 static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
4307 for_each_hwfn(cdev, i) {
4308 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4309 struct qed_public_vf_info *vf;
4311 if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
4313 "SR-IOV sanity check failed, can't set tx rate\n");
4317 vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
4321 qed_inform_vf_link_state(p_hwfn);
4327 static int qed_set_vf_rate(struct qed_dev *cdev,
4328 int vfid, u32 min_rate, u32 max_rate)
4330 int rc_min = 0, rc_max = 0;
4333 rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
4336 rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
4338 if (rc_max | rc_min)
4344 static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
4348 for_each_hwfn(cdev, i) {
4349 struct qed_hwfn *hwfn = &cdev->hwfns[i];
4350 struct qed_public_vf_info *vf;
4352 if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
4354 "SR-IOV sanity check failed, can't set trust\n");
4358 vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
4360 if (vf->is_trusted_request == trust)
4362 vf->is_trusted_request = trust;
4364 qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
4370 static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
4372 u64 events[QED_VF_ARRAY_LENGTH];
4373 struct qed_ptt *ptt;
4376 ptt = qed_ptt_acquire(hwfn);
4378 DP_VERBOSE(hwfn, QED_MSG_IOV,
4379 "Can't acquire PTT; re-scheduling\n");
4380 qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
4384 qed_iov_pf_get_pending_events(hwfn, events);
4386 DP_VERBOSE(hwfn, QED_MSG_IOV,
4387 "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
4388 events[0], events[1], events[2]);
4390 qed_for_each_vf(hwfn, i) {
4391 /* Skip VFs with no pending messages */
4392 if (!(events[i / 64] & (1ULL << (i % 64))))
4395 DP_VERBOSE(hwfn, QED_MSG_IOV,
4396 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
4397 i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4399 /* Copy VF's message to PF's request buffer for that VF */
4400 if (qed_iov_copy_vf_msg(hwfn, ptt, i))
4403 qed_iov_process_mbx_req(hwfn, ptt, i);
4406 qed_ptt_release(hwfn, ptt);
4409 static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
4413 qed_for_each_vf(hwfn, i) {
4414 struct qed_public_vf_info *info;
4415 bool update = false;
4418 info = qed_iov_get_public_vf_info(hwfn, i, true);
4422 /* Update data on bulletin board */
4423 mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
4424 if (is_valid_ether_addr(info->forced_mac) &&
4425 (!mac || !ether_addr_equal(mac, info->forced_mac))) {
4428 "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
4430 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4432 /* Update bulletin board with forced MAC */
4433 qed_iov_bulletin_set_forced_mac(hwfn,
4434 info->forced_mac, i);
4438 if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
4439 info->forced_vlan) {
4442 "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
4445 hwfn->cdev->p_iov_info->first_vf_in_pf + i);
4446 qed_iov_bulletin_set_forced_vlan(hwfn,
4447 info->forced_vlan, i);
4452 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4456 static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
4458 struct qed_ptt *ptt;
4461 ptt = qed_ptt_acquire(hwfn);
4463 DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
4464 qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
4468 qed_for_each_vf(hwfn, i)
4469 qed_iov_post_vf_bulletin(hwfn, i, ptt);
4471 qed_ptt_release(hwfn, ptt);
4474 static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
4476 struct qed_sp_vport_update_params params;
4477 struct qed_filter_accept_flags *flags;
4478 struct qed_public_vf_info *vf_info;
4479 struct qed_vf_info *vf;
4483 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
4484 flags = ¶ms.accept_flags;
4486 qed_for_each_vf(hwfn, i) {
4487 /* Need to make sure current requested configuration didn't
4488 * flip so that we'll end up configuring something that's not
4491 vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
4492 if (vf_info->is_trusted_configured ==
4493 vf_info->is_trusted_request)
4495 vf_info->is_trusted_configured = vf_info->is_trusted_request;
4497 /* Validate that the VF has a configured vport */
4498 vf = qed_iov_get_vf_info(hwfn, i, true);
4499 if (!vf->vport_instance)
4502 memset(¶ms, 0, sizeof(params));
4503 params.opaque_fid = vf->opaque_fid;
4504 params.vport_id = vf->vport_id;
4506 if (vf_info->rx_accept_mode & mask) {
4507 flags->update_rx_mode_config = 1;
4508 flags->rx_accept_filter = vf_info->rx_accept_mode;
4511 if (vf_info->tx_accept_mode & mask) {
4512 flags->update_tx_mode_config = 1;
4513 flags->tx_accept_filter = vf_info->tx_accept_mode;
4516 /* Remove if needed; Otherwise this would set the mask */
4517 if (!vf_info->is_trusted_configured) {
4518 flags->rx_accept_filter &= ~mask;
4519 flags->tx_accept_filter &= ~mask;
4522 if (flags->update_rx_mode_config ||
4523 flags->update_tx_mode_config)
4524 qed_sp_vport_update(hwfn, ¶ms,
4525 QED_SPQ_MODE_EBLOCK, NULL);
4529 static void qed_iov_pf_task(struct work_struct *work)
4532 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
4536 if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
4539 if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
4540 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
4543 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
4547 rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
4549 qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
4551 qed_ptt_release(hwfn, ptt);
4554 if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
4555 qed_handle_vf_msg(hwfn);
4557 if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
4558 &hwfn->iov_task_flags))
4559 qed_handle_pf_set_vf_unicast(hwfn);
4561 if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
4562 &hwfn->iov_task_flags))
4563 qed_handle_bulletin_post(hwfn);
4565 if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
4566 qed_iov_handle_trust_change(hwfn);
4569 void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
4573 for_each_hwfn(cdev, i) {
4574 if (!cdev->hwfns[i].iov_wq)
4577 if (schedule_first) {
4578 qed_schedule_iov(&cdev->hwfns[i],
4579 QED_IOV_WQ_STOP_WQ_FLAG);
4580 cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
4583 flush_workqueue(cdev->hwfns[i].iov_wq);
4584 destroy_workqueue(cdev->hwfns[i].iov_wq);
4588 int qed_iov_wq_start(struct qed_dev *cdev)
4590 char name[NAME_SIZE];
4593 for_each_hwfn(cdev, i) {
4594 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4596 /* PFs needs a dedicated workqueue only if they support IOV.
4597 * VFs always require one.
4599 if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
4602 snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
4603 cdev->pdev->bus->number,
4604 PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
4606 p_hwfn->iov_wq = create_singlethread_workqueue(name);
4607 if (!p_hwfn->iov_wq) {
4608 DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
4613 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
4615 INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
4621 const struct qed_iov_hv_ops qed_iov_ops_pass = {
4622 .configure = &qed_sriov_configure,
4623 .set_mac = &qed_sriov_pf_set_mac,
4624 .set_vlan = &qed_sriov_pf_set_vlan,
4625 .get_config = &qed_get_vf_config,
4626 .set_link_state = &qed_set_vf_link_state,
4627 .set_spoof = &qed_spoof_configure,
4628 .set_rate = &qed_set_vf_rate,
4629 .set_trust = &qed_set_vf_trust,