1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/stddef.h>
10 #include <linux/pci.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/version.h>
14 #include <linux/delay.h>
15 #include <asm/byteorder.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/string.h>
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/workqueue.h>
21 #include <linux/ethtool.h>
22 #include <linux/etherdevice.h>
23 #include <linux/vmalloc.h>
24 #include <linux/qed/qed_if.h>
25 #include <linux/qed/qed_ll2_if.h>
28 #include "qed_sriov.h"
30 #include "qed_dev_api.h"
34 #include "qed_selftest.h"
36 static char version[] =
37 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
39 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(DRV_MODULE_VERSION);
43 #define FW_FILE_VERSION \
44 __stringify(FW_MAJOR_VERSION) "." \
45 __stringify(FW_MINOR_VERSION) "." \
46 __stringify(FW_REVISION_VERSION) "." \
47 __stringify(FW_ENGINEERING_VERSION)
49 #define QED_FW_FILE_NAME \
50 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
52 MODULE_FIRMWARE(QED_FW_FILE_NAME);
54 static int __init qed_init(void)
56 pr_info("%s", version);
61 static void __exit qed_cleanup(void)
63 pr_notice("qed_cleanup called\n");
66 module_init(qed_init);
67 module_exit(qed_cleanup);
69 /* Check if the DMA controller on the machine can properly handle the DMA
70 * addressing required by the device.
72 static int qed_set_coherency_mask(struct qed_dev *cdev)
74 struct device *dev = &cdev->pdev->dev;
76 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
77 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
79 "Can't request 64-bit consistent allocations\n");
82 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
83 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
90 static void qed_free_pci(struct qed_dev *cdev)
92 struct pci_dev *pdev = cdev->pdev;
95 iounmap(cdev->doorbells);
97 iounmap(cdev->regview);
98 if (atomic_read(&pdev->enable_cnt) == 1)
99 pci_release_regions(pdev);
101 pci_disable_device(pdev);
104 #define PCI_REVISION_ID_ERROR_VAL 0xff
106 /* Performs PCI initializations as well as initializing PCI-related parameters
107 * in the device structrue. Returns 0 in case of success.
109 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
116 rc = pci_enable_device(pdev);
118 DP_NOTICE(cdev, "Cannot enable PCI device\n");
122 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
123 DP_NOTICE(cdev, "No memory region found in bar #0\n");
128 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
129 DP_NOTICE(cdev, "No memory region found in bar #2\n");
134 if (atomic_read(&pdev->enable_cnt) == 1) {
135 rc = pci_request_regions(pdev, "qed");
138 "Failed to request PCI memory resources\n");
141 pci_set_master(pdev);
142 pci_save_state(pdev);
145 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
146 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
148 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
153 if (!pci_is_pcie(pdev)) {
154 DP_NOTICE(cdev, "The bus is not PCI Express\n");
159 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
160 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
161 DP_NOTICE(cdev, "Cannot find power management capability\n");
163 rc = qed_set_coherency_mask(cdev);
167 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
168 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
169 cdev->pci_params.irq = pdev->irq;
171 cdev->regview = pci_ioremap_bar(pdev, 0);
172 if (!cdev->regview) {
173 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
179 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
180 cdev->db_size = pci_resource_len(cdev->pdev, 2);
181 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
182 if (!cdev->doorbells) {
183 DP_NOTICE(cdev, "Cannot map doorbell space\n");
191 pci_release_regions(pdev);
193 pci_disable_device(pdev);
198 int qed_fill_dev_info(struct qed_dev *cdev,
199 struct qed_dev_info *dev_info)
203 memset(dev_info, 0, sizeof(struct qed_dev_info));
205 dev_info->num_hwfns = cdev->num_hwfns;
206 dev_info->pci_mem_start = cdev->pci_params.mem_start;
207 dev_info->pci_mem_end = cdev->pci_params.mem_end;
208 dev_info->pci_irq = cdev->pci_params.irq;
209 dev_info->rdma_supported =
210 (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE);
211 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
212 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
215 dev_info->fw_major = FW_MAJOR_VERSION;
216 dev_info->fw_minor = FW_MINOR_VERSION;
217 dev_info->fw_rev = FW_REVISION_VERSION;
218 dev_info->fw_eng = FW_ENGINEERING_VERSION;
219 dev_info->mf_mode = cdev->mf_mode;
220 dev_info->tx_switching = true;
222 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
223 &dev_info->fw_minor, &dev_info->fw_rev,
228 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
230 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
231 &dev_info->mfw_rev, NULL);
233 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
234 &dev_info->flash_size);
236 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
239 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
240 &dev_info->mfw_rev, NULL);
246 static void qed_free_cdev(struct qed_dev *cdev)
251 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
253 struct qed_dev *cdev;
255 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
259 qed_init_struct(cdev);
264 /* Sets the requested power state */
265 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
270 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
275 static struct qed_dev *qed_probe(struct pci_dev *pdev,
276 struct qed_probe_params *params)
278 struct qed_dev *cdev;
281 cdev = qed_alloc_cdev(pdev);
285 cdev->protocol = params->protocol;
288 cdev->b_is_vf = true;
290 qed_init_dp(cdev, params->dp_module, params->dp_level);
292 rc = qed_init_pci(cdev, pdev);
294 DP_ERR(cdev, "init pci failed\n");
297 DP_INFO(cdev, "PCI init completed successfully\n");
299 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
301 DP_ERR(cdev, "hw prepare failed\n");
305 DP_INFO(cdev, "qed_probe completed successffuly\n");
317 static void qed_remove(struct qed_dev *cdev)
326 qed_set_power_state(cdev, PCI_D3hot);
331 static void qed_disable_msix(struct qed_dev *cdev)
333 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
334 pci_disable_msix(cdev->pdev);
335 kfree(cdev->int_params.msix_table);
336 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
337 pci_disable_msi(cdev->pdev);
340 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
343 static int qed_enable_msix(struct qed_dev *cdev,
344 struct qed_int_params *int_params)
348 cnt = int_params->in.num_vectors;
350 for (i = 0; i < cnt; i++)
351 int_params->msix_table[i].entry = i;
353 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
354 int_params->in.min_msix_cnt, cnt);
355 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
356 (rc % cdev->num_hwfns)) {
357 pci_disable_msix(cdev->pdev);
359 /* If fastpath is initialized, we need at least one interrupt
360 * per hwfn [and the slow path interrupts]. New requested number
361 * should be a multiple of the number of hwfns.
363 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
365 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
366 cnt, int_params->in.num_vectors);
367 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
374 /* MSI-x configuration was achieved */
375 int_params->out.int_mode = QED_INT_MODE_MSIX;
376 int_params->out.num_vectors = rc;
380 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
387 /* This function outputs the int mode and the number of enabled msix vector */
388 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
390 struct qed_int_params *int_params = &cdev->int_params;
391 struct msix_entry *tbl;
394 switch (int_params->in.int_mode) {
395 case QED_INT_MODE_MSIX:
396 /* Allocate MSIX table */
397 cnt = int_params->in.num_vectors;
398 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
399 if (!int_params->msix_table) {
405 rc = qed_enable_msix(cdev, int_params);
409 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
410 kfree(int_params->msix_table);
415 case QED_INT_MODE_MSI:
416 if (cdev->num_hwfns == 1) {
417 rc = pci_enable_msi(cdev->pdev);
419 int_params->out.int_mode = QED_INT_MODE_MSI;
423 DP_NOTICE(cdev, "Failed to enable MSI\n");
429 case QED_INT_MODE_INTA:
430 int_params->out.int_mode = QED_INT_MODE_INTA;
434 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
435 int_params->in.int_mode);
441 DP_INFO(cdev, "Using %s interrupts\n",
442 int_params->out.int_mode == QED_INT_MODE_INTA ?
443 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
445 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
450 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
451 int index, void(*handler)(void *))
453 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
454 int relative_idx = index / cdev->num_hwfns;
456 hwfn->simd_proto_handler[relative_idx].func = handler;
457 hwfn->simd_proto_handler[relative_idx].token = token;
460 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
462 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
463 int relative_idx = index / cdev->num_hwfns;
465 memset(&hwfn->simd_proto_handler[relative_idx], 0,
466 sizeof(struct qed_simd_fp_handler));
469 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
471 tasklet_schedule((struct tasklet_struct *)tasklet);
475 static irqreturn_t qed_single_int(int irq, void *dev_instance)
477 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
478 struct qed_hwfn *hwfn;
479 irqreturn_t rc = IRQ_NONE;
483 for (i = 0; i < cdev->num_hwfns; i++) {
484 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
489 hwfn = &cdev->hwfns[i];
491 /* Slowpath interrupt */
492 if (unlikely(status & 0x1)) {
493 tasklet_schedule(hwfn->sp_dpc);
498 /* Fastpath interrupts */
499 for (j = 0; j < 64; j++) {
500 if ((0x2ULL << j) & status) {
501 hwfn->simd_proto_handler[j].func(
502 hwfn->simd_proto_handler[j].token);
503 status &= ~(0x2ULL << j);
508 if (unlikely(status))
509 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
510 "got an unknown interrupt status 0x%llx\n",
517 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
519 struct qed_dev *cdev = hwfn->cdev;
524 int_mode = cdev->int_params.out.int_mode;
525 if (int_mode == QED_INT_MODE_MSIX) {
527 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
528 id, cdev->pdev->bus->number,
529 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
530 rc = request_irq(cdev->int_params.msix_table[id].vector,
531 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
533 unsigned long flags = 0;
535 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
536 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
537 PCI_FUNC(cdev->pdev->devfn));
539 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
540 flags |= IRQF_SHARED;
542 rc = request_irq(cdev->pdev->irq, qed_single_int,
543 flags, cdev->name, cdev);
547 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
549 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
550 "Requested slowpath %s\n",
551 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
556 static void qed_slowpath_irq_free(struct qed_dev *cdev)
560 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
561 for_each_hwfn(cdev, i) {
562 if (!cdev->hwfns[i].b_int_requested)
564 synchronize_irq(cdev->int_params.msix_table[i].vector);
565 free_irq(cdev->int_params.msix_table[i].vector,
566 cdev->hwfns[i].sp_dpc);
569 if (QED_LEADING_HWFN(cdev)->b_int_requested)
570 free_irq(cdev->pdev->irq, cdev);
572 qed_int_disable_post_isr_release(cdev);
575 static int qed_nic_stop(struct qed_dev *cdev)
579 rc = qed_hw_stop(cdev);
581 for (i = 0; i < cdev->num_hwfns; i++) {
582 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
584 if (p_hwfn->b_sp_dpc_enabled) {
585 tasklet_disable(p_hwfn->sp_dpc);
586 p_hwfn->b_sp_dpc_enabled = false;
587 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
588 "Disabled sp taskelt [hwfn %d] at %p\n",
593 qed_dbg_pf_exit(cdev);
598 static int qed_nic_reset(struct qed_dev *cdev)
602 rc = qed_hw_reset(cdev);
611 static int qed_nic_setup(struct qed_dev *cdev)
615 /* Determine if interface is going to require LL2 */
616 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
617 for (i = 0; i < cdev->num_hwfns; i++) {
618 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
620 p_hwfn->using_ll2 = true;
624 rc = qed_resc_alloc(cdev);
628 DP_INFO(cdev, "Allocated qed resources\n");
630 qed_resc_setup(cdev);
635 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
639 /* Mark the fastpath as free/used */
640 cdev->int_params.fp_initialized = cnt ? true : false;
642 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
643 limit = cdev->num_hwfns * 63;
644 else if (cdev->int_params.fp_msix_cnt)
645 limit = cdev->int_params.fp_msix_cnt;
650 return min_t(int, cnt, limit);
653 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
655 memset(info, 0, sizeof(struct qed_int_info));
657 if (!cdev->int_params.fp_initialized) {
659 "Protocol driver requested interrupt information, but its support is not yet configured\n");
663 /* Need to expose only MSI-X information; Single IRQ is handled solely
666 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
667 int msix_base = cdev->int_params.fp_msix_base;
669 info->msix_cnt = cdev->int_params.fp_msix_cnt;
670 info->msix = &cdev->int_params.msix_table[msix_base];
676 static int qed_slowpath_setup_int(struct qed_dev *cdev,
677 enum qed_int_mode int_mode)
679 struct qed_sb_cnt_info sb_cnt_info;
683 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
684 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
688 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
689 cdev->int_params.in.int_mode = int_mode;
690 for_each_hwfn(cdev, i) {
691 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
692 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
693 cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
694 cdev->int_params.in.num_vectors++; /* slowpath */
697 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
698 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
700 rc = qed_set_int_mode(cdev, false);
702 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
706 cdev->int_params.fp_msix_base = cdev->num_hwfns;
707 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
713 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
717 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
718 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
720 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
721 &cdev->int_params.in.num_vectors);
722 if (cdev->num_hwfns > 1) {
725 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
726 cdev->int_params.in.num_vectors += vectors;
729 /* We want a minimum of one fastpath vector per vf hwfn */
730 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
732 rc = qed_set_int_mode(cdev, true);
736 cdev->int_params.fp_msix_base = 0;
737 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
742 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
743 u8 *input_buf, u32 max_size, u8 *unzip_buf)
747 p_hwfn->stream->next_in = input_buf;
748 p_hwfn->stream->avail_in = input_len;
749 p_hwfn->stream->next_out = unzip_buf;
750 p_hwfn->stream->avail_out = max_size;
752 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
755 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
760 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
761 zlib_inflateEnd(p_hwfn->stream);
763 if (rc != Z_OK && rc != Z_STREAM_END) {
764 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
765 p_hwfn->stream->msg, rc);
769 return p_hwfn->stream->total_out / 4;
772 static int qed_alloc_stream_mem(struct qed_dev *cdev)
777 for_each_hwfn(cdev, i) {
778 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
780 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
784 workspace = vzalloc(zlib_inflate_workspacesize());
787 p_hwfn->stream->workspace = workspace;
793 static void qed_free_stream_mem(struct qed_dev *cdev)
797 for_each_hwfn(cdev, i) {
798 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
803 vfree(p_hwfn->stream->workspace);
804 kfree(p_hwfn->stream);
808 static void qed_update_pf_params(struct qed_dev *cdev,
809 struct qed_pf_params *params)
813 for (i = 0; i < cdev->num_hwfns; i++) {
814 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
816 p_hwfn->pf_params = *params;
820 static int qed_slowpath_start(struct qed_dev *cdev,
821 struct qed_slowpath_params *params)
823 struct qed_tunn_start_params tunn_info;
824 struct qed_mcp_drv_version drv_version;
825 const u8 *data = NULL;
826 struct qed_hwfn *hwfn;
829 if (qed_iov_wq_start(cdev))
833 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
837 "Failed to find fw file - /lib/firmware/%s\n",
843 rc = qed_nic_setup(cdev);
848 rc = qed_slowpath_setup_int(cdev, params->int_mode);
850 rc = qed_slowpath_vf_setup_int(cdev);
855 /* Allocate stream for unzipping */
856 rc = qed_alloc_stream_mem(cdev);
860 /* First Dword used to diffrentiate between various sources */
861 data = cdev->firmware->data + sizeof(u32);
863 qed_dbg_pf_init(cdev);
866 memset(&tunn_info, 0, sizeof(tunn_info));
867 tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
868 1 << QED_MODE_L2GRE_TUNN |
869 1 << QED_MODE_IPGRE_TUNN |
870 1 << QED_MODE_L2GENEVE_TUNN |
871 1 << QED_MODE_IPGENEVE_TUNN;
873 tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
874 tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
875 tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
877 /* Start the slowpath */
878 rc = qed_hw_init(cdev, &tunn_info, true,
879 cdev->int_params.out.int_mode,
885 "HW initialization and function start completed successfully\n");
887 /* Allocate LL2 interface if needed */
888 if (QED_LEADING_HWFN(cdev)->using_ll2) {
889 rc = qed_ll2_alloc_if(cdev);
894 hwfn = QED_LEADING_HWFN(cdev);
895 drv_version.version = (params->drv_major << 24) |
896 (params->drv_minor << 16) |
897 (params->drv_rev << 8) |
899 strlcpy(drv_version.name, params->name,
900 MCP_DRV_VER_STR_SIZE - 4);
901 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
904 DP_NOTICE(cdev, "Failed sending drv version command\n");
909 qed_reset_vport_stats(cdev);
916 qed_hw_timers_stop_all(cdev);
918 qed_slowpath_irq_free(cdev);
919 qed_free_stream_mem(cdev);
920 qed_disable_msix(cdev);
925 release_firmware(cdev->firmware);
927 qed_iov_wq_stop(cdev, false);
932 static int qed_slowpath_stop(struct qed_dev *cdev)
937 qed_ll2_dealloc_if(cdev);
940 qed_free_stream_mem(cdev);
941 if (IS_QED_ETH_IF(cdev))
942 qed_sriov_disable(cdev, true);
945 qed_slowpath_irq_free(cdev);
948 qed_disable_msix(cdev);
951 qed_iov_wq_stop(cdev, true);
954 release_firmware(cdev->firmware);
959 static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
960 char ver_str[VER_SIZE])
964 memcpy(cdev->name, name, NAME_SIZE);
965 for_each_hwfn(cdev, i)
966 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
968 memcpy(cdev->ver_str, ver_str, VER_SIZE);
969 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
972 static u32 qed_sb_init(struct qed_dev *cdev,
973 struct qed_sb_info *sb_info,
975 dma_addr_t sb_phy_addr, u16 sb_id,
976 enum qed_sb_type type)
978 struct qed_hwfn *p_hwfn;
984 /* RoCE uses single engine and CMT uses two engines. When using both
985 * we force only a single engine. Storage uses only engine 0 too.
987 if (type == QED_SB_TYPE_L2_QUEUE)
988 n_hwfns = cdev->num_hwfns;
992 hwfn_index = sb_id % n_hwfns;
993 p_hwfn = &cdev->hwfns[hwfn_index];
994 rel_sb_id = sb_id / n_hwfns;
996 DP_VERBOSE(cdev, NETIF_MSG_INTR,
997 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
998 hwfn_index, rel_sb_id, sb_id);
1000 rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
1001 sb_virt_addr, sb_phy_addr, rel_sb_id);
1006 static u32 qed_sb_release(struct qed_dev *cdev,
1007 struct qed_sb_info *sb_info, u16 sb_id)
1009 struct qed_hwfn *p_hwfn;
1014 hwfn_index = sb_id % cdev->num_hwfns;
1015 p_hwfn = &cdev->hwfns[hwfn_index];
1016 rel_sb_id = sb_id / cdev->num_hwfns;
1018 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1019 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1020 hwfn_index, rel_sb_id, sb_id);
1022 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1027 static bool qed_can_link_change(struct qed_dev *cdev)
1032 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1034 struct qed_hwfn *hwfn;
1035 struct qed_mcp_link_params *link_params;
1036 struct qed_ptt *ptt;
1045 /* The link should be set only once per PF */
1046 hwfn = &cdev->hwfns[0];
1048 ptt = qed_ptt_acquire(hwfn);
1052 link_params = qed_mcp_get_link_params(hwfn);
1053 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1054 link_params->speed.autoneg = params->autoneg;
1055 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1056 link_params->speed.advertised_speeds = 0;
1057 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
1058 (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
1059 link_params->speed.advertised_speeds |=
1060 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1061 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
1062 link_params->speed.advertised_speeds |=
1063 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1064 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
1065 link_params->speed.advertised_speeds |=
1066 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1067 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
1068 link_params->speed.advertised_speeds |=
1069 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1070 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
1071 link_params->speed.advertised_speeds |=
1072 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1073 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
1074 link_params->speed.advertised_speeds |=
1075 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1077 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1078 link_params->speed.forced_speed = params->forced_speed;
1079 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1080 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1081 link_params->pause.autoneg = true;
1083 link_params->pause.autoneg = false;
1084 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1085 link_params->pause.forced_rx = true;
1087 link_params->pause.forced_rx = false;
1088 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1089 link_params->pause.forced_tx = true;
1091 link_params->pause.forced_tx = false;
1093 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1094 switch (params->loopback_mode) {
1095 case QED_LINK_LOOPBACK_INT_PHY:
1096 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1098 case QED_LINK_LOOPBACK_EXT_PHY:
1099 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1101 case QED_LINK_LOOPBACK_EXT:
1102 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1104 case QED_LINK_LOOPBACK_MAC:
1105 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1108 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1113 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1115 qed_ptt_release(hwfn, ptt);
1120 static int qed_get_port_type(u32 media_type)
1124 switch (media_type) {
1125 case MEDIA_SFPP_10G_FIBER:
1126 case MEDIA_SFP_1G_FIBER:
1127 case MEDIA_XFP_FIBER:
1128 case MEDIA_MODULE_FIBER:
1130 port_type = PORT_FIBRE;
1132 case MEDIA_DA_TWINAX:
1133 port_type = PORT_DA;
1136 port_type = PORT_TP;
1138 case MEDIA_NOT_PRESENT:
1139 port_type = PORT_NONE;
1141 case MEDIA_UNSPECIFIED:
1143 port_type = PORT_OTHER;
1149 static int qed_get_link_data(struct qed_hwfn *hwfn,
1150 struct qed_mcp_link_params *params,
1151 struct qed_mcp_link_state *link,
1152 struct qed_mcp_link_capabilities *link_caps)
1156 if (!IS_PF(hwfn->cdev)) {
1157 qed_vf_get_link_params(hwfn, params);
1158 qed_vf_get_link_state(hwfn, link);
1159 qed_vf_get_link_caps(hwfn, link_caps);
1164 p = qed_mcp_get_link_params(hwfn);
1167 memcpy(params, p, sizeof(*params));
1169 p = qed_mcp_get_link_state(hwfn);
1172 memcpy(link, p, sizeof(*link));
1174 p = qed_mcp_get_link_capabilities(hwfn);
1177 memcpy(link_caps, p, sizeof(*link_caps));
1182 static void qed_fill_link(struct qed_hwfn *hwfn,
1183 struct qed_link_output *if_link)
1185 struct qed_mcp_link_params params;
1186 struct qed_mcp_link_state link;
1187 struct qed_mcp_link_capabilities link_caps;
1190 memset(if_link, 0, sizeof(*if_link));
1192 /* Prepare source inputs */
1193 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
1194 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1198 /* Set the link parameters to pass to protocol driver */
1200 if_link->link_up = true;
1202 /* TODO - at the moment assume supported and advertised speed equal */
1203 if_link->supported_caps = QED_LM_FIBRE_BIT;
1204 if (params.speed.autoneg)
1205 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1206 if (params.pause.autoneg ||
1207 (params.pause.forced_rx && params.pause.forced_tx))
1208 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1209 if (params.pause.autoneg || params.pause.forced_rx ||
1210 params.pause.forced_tx)
1211 if_link->supported_caps |= QED_LM_Pause_BIT;
1213 if_link->advertised_caps = if_link->supported_caps;
1214 if (params.speed.advertised_speeds &
1215 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1216 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
1217 QED_LM_1000baseT_Full_BIT;
1218 if (params.speed.advertised_speeds &
1219 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1220 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
1221 if (params.speed.advertised_speeds &
1222 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1223 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
1224 if (params.speed.advertised_speeds &
1225 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1226 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
1227 if (params.speed.advertised_speeds &
1228 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1229 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
1230 if (params.speed.advertised_speeds &
1231 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1232 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
1234 if (link_caps.speed_capabilities &
1235 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1236 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
1237 QED_LM_1000baseT_Full_BIT;
1238 if (link_caps.speed_capabilities &
1239 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1240 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
1241 if (link_caps.speed_capabilities &
1242 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1243 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
1244 if (link_caps.speed_capabilities &
1245 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1246 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
1247 if (link_caps.speed_capabilities &
1248 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1249 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
1250 if (link_caps.speed_capabilities &
1251 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1252 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
1255 if_link->speed = link.speed;
1257 /* TODO - fill duplex properly */
1258 if_link->duplex = DUPLEX_FULL;
1259 qed_mcp_get_media_type(hwfn->cdev, &media_type);
1260 if_link->port = qed_get_port_type(media_type);
1262 if_link->autoneg = params.speed.autoneg;
1264 if (params.pause.autoneg)
1265 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1266 if (params.pause.forced_rx)
1267 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1268 if (params.pause.forced_tx)
1269 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1271 /* Link partner capabilities */
1272 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
1273 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
1274 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
1275 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1276 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1277 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1278 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1279 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1280 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1281 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1282 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1283 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1284 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1285 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1287 if (link.an_complete)
1288 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1290 if (link.partner_adv_pause)
1291 if_link->lp_caps |= QED_LM_Pause_BIT;
1292 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1293 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1294 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1297 static void qed_get_current_link(struct qed_dev *cdev,
1298 struct qed_link_output *if_link)
1302 qed_fill_link(&cdev->hwfns[0], if_link);
1304 for_each_hwfn(cdev, i)
1305 qed_inform_vf_link_state(&cdev->hwfns[i]);
1308 void qed_link_update(struct qed_hwfn *hwfn)
1310 void *cookie = hwfn->cdev->ops_cookie;
1311 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1312 struct qed_link_output if_link;
1314 qed_fill_link(hwfn, &if_link);
1315 qed_inform_vf_link_state(hwfn);
1317 if (IS_LEAD_HWFN(hwfn) && cookie)
1318 op->link_update(cookie, &if_link);
1321 static int qed_drain(struct qed_dev *cdev)
1323 struct qed_hwfn *hwfn;
1324 struct qed_ptt *ptt;
1330 for_each_hwfn(cdev, i) {
1331 hwfn = &cdev->hwfns[i];
1332 ptt = qed_ptt_acquire(hwfn);
1334 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1337 rc = qed_mcp_drain(hwfn, ptt);
1340 qed_ptt_release(hwfn, ptt);
1346 static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
1348 *rx_coal = cdev->rx_coalesce_usecs;
1349 *tx_coal = cdev->tx_coalesce_usecs;
1352 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
1355 struct qed_hwfn *hwfn;
1356 struct qed_ptt *ptt;
1360 hwfn_index = qid % cdev->num_hwfns;
1361 hwfn = &cdev->hwfns[hwfn_index];
1362 ptt = qed_ptt_acquire(hwfn);
1366 status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
1367 qid / cdev->num_hwfns, sb_id);
1370 status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
1371 qid / cdev->num_hwfns, sb_id);
1373 qed_ptt_release(hwfn, ptt);
1378 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
1380 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1381 struct qed_ptt *ptt;
1384 ptt = qed_ptt_acquire(hwfn);
1388 status = qed_mcp_set_led(hwfn, ptt, mode);
1390 qed_ptt_release(hwfn, ptt);
1395 struct qed_selftest_ops qed_selftest_ops_pass = {
1396 .selftest_memory = &qed_selftest_memory,
1397 .selftest_interrupt = &qed_selftest_interrupt,
1398 .selftest_register = &qed_selftest_register,
1399 .selftest_clock = &qed_selftest_clock,
1402 const struct qed_common_ops qed_common_ops_pass = {
1403 .selftest = &qed_selftest_ops_pass,
1404 .probe = &qed_probe,
1405 .remove = &qed_remove,
1406 .set_power_state = &qed_set_power_state,
1407 .set_id = &qed_set_id,
1408 .update_pf_params = &qed_update_pf_params,
1409 .slowpath_start = &qed_slowpath_start,
1410 .slowpath_stop = &qed_slowpath_stop,
1411 .set_fp_int = &qed_set_int_fp,
1412 .get_fp_int = &qed_get_int_fp,
1413 .sb_init = &qed_sb_init,
1414 .sb_release = &qed_sb_release,
1415 .simd_handler_config = &qed_simd_handler_config,
1416 .simd_handler_clean = &qed_simd_handler_clean,
1417 .can_link_change = &qed_can_link_change,
1418 .set_link = &qed_set_link,
1419 .get_link = &qed_get_current_link,
1420 .drain = &qed_drain,
1421 .update_msglvl = &qed_init_dp,
1422 .dbg_all_data = &qed_dbg_all_data,
1423 .dbg_all_data_size = &qed_dbg_all_data_size,
1424 .chain_alloc = &qed_chain_alloc,
1425 .chain_free = &qed_chain_free,
1426 .get_coalesce = &qed_get_coalesce,
1427 .set_coalesce = &qed_set_coalesce,
1428 .set_led = &qed_set_led,
1431 void qed_get_protocol_stats(struct qed_dev *cdev,
1432 enum qed_mcp_protocol_type type,
1433 union qed_mcp_protocol_stats *stats)
1435 struct qed_eth_stats eth_stats;
1437 memset(stats, 0, sizeof(*stats));
1440 case QED_MCP_LAN_STATS:
1441 qed_get_vport_stats(cdev, ð_stats);
1442 stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
1443 stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
1444 stats->lan_stats.fcs_err = -1;
1447 DP_ERR(cdev, "Invalid protocol type = %d\n", type);