2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <asm-generic/kmap_types.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/mlx5/cq.h>
44 #include <linux/mlx5/qp.h>
45 #include <linux/mlx5/srq.h>
46 #include <linux/debugfs.h>
47 #include <linux/kmod.h>
48 #include <linux/mlx5/mlx5_ifc.h>
49 #include "mlx5_core.h"
51 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
52 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
53 MODULE_LICENSE("Dual BSD/GPL");
54 MODULE_VERSION(DRIVER_VERSION);
56 int mlx5_core_debug_mask;
57 module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
58 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
60 #define MLX5_DEFAULT_PROF 2
61 static int prof_sel = MLX5_DEFAULT_PROF;
62 module_param_named(prof_sel, prof_sel, int, 0444);
63 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
65 struct workqueue_struct *mlx5_core_wq;
66 static LIST_HEAD(intf_list);
67 static LIST_HEAD(dev_list);
68 static DEFINE_MUTEX(intf_mutex);
70 struct mlx5_device_context {
71 struct list_head list;
72 struct mlx5_interface *intf;
76 static struct mlx5_profile profile[] = {
81 .mask = MLX5_PROF_MASK_QP_SIZE,
85 .mask = MLX5_PROF_MASK_QP_SIZE |
86 MLX5_PROF_MASK_MR_CACHE,
155 static int set_dma_caps(struct pci_dev *pdev)
159 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
161 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
162 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
164 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
169 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
172 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
173 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
176 "Can't set consistent PCI DMA mask, aborting\n");
181 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
185 static int request_bar(struct pci_dev *pdev)
189 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
190 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
194 err = pci_request_regions(pdev, DRIVER_NAME);
196 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
201 static void release_bar(struct pci_dev *pdev)
203 pci_release_regions(pdev);
206 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
208 struct mlx5_priv *priv = &dev->priv;
209 struct mlx5_eq_table *table = &priv->eq_table;
210 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
214 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
215 MLX5_EQ_VEC_COMP_BASE;
216 nvec = min_t(int, nvec, num_eqs);
217 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
220 priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
222 priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
223 if (!priv->msix_arr || !priv->irq_info)
226 for (i = 0; i < nvec; i++)
227 priv->msix_arr[i].entry = i;
229 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
230 MLX5_EQ_VEC_COMP_BASE + 1, nvec);
234 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
239 kfree(priv->irq_info);
240 kfree(priv->msix_arr);
244 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
246 struct mlx5_priv *priv = &dev->priv;
248 pci_disable_msix(dev->pdev);
249 kfree(priv->irq_info);
250 kfree(priv->msix_arr);
253 struct mlx5_reg_host_endianess {
259 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
262 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
263 MLX5_DEV_CAP_FLAG_DCT,
266 static u16 to_fw_pkey_sz(u32 size)
282 pr_warn("invalid pkey table size %d\n", size);
287 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
288 enum mlx5_cap_mode cap_mode)
290 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
291 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
292 void *out, *hca_caps;
293 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
296 memset(in, 0, sizeof(in));
297 out = kzalloc(out_sz, GFP_KERNEL);
301 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
302 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
303 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
307 err = mlx5_cmd_status_to_err_v2(out);
310 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
311 cap_type, cap_mode, err);
315 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
318 case HCA_CAP_OPMOD_GET_MAX:
319 memcpy(dev->hca_caps_max[cap_type], hca_caps,
320 MLX5_UN_SZ_BYTES(hca_cap_union));
322 case HCA_CAP_OPMOD_GET_CUR:
323 memcpy(dev->hca_caps_cur[cap_type], hca_caps,
324 MLX5_UN_SZ_BYTES(hca_cap_union));
328 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
338 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz)
340 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
343 memset(out, 0, sizeof(out));
345 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
346 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
350 err = mlx5_cmd_status_to_err_v2(out);
355 static int handle_hca_cap(struct mlx5_core_dev *dev)
357 void *set_ctx = NULL;
358 struct mlx5_profile *prof = dev->profile;
360 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
363 set_ctx = kzalloc(set_sz, GFP_KERNEL);
367 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
371 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
375 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
377 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
378 MLX5_ST_SZ_BYTES(cmd_hca_cap));
380 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
381 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
383 /* we limit the size of the pkey table to 128 entries for now */
384 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
387 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
388 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
391 /* disable cmdif checksum */
392 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
394 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
396 err = set_caps(dev, set_ctx, set_sz);
403 static int set_hca_ctrl(struct mlx5_core_dev *dev)
405 struct mlx5_reg_host_endianess he_in;
406 struct mlx5_reg_host_endianess he_out;
409 memset(&he_in, 0, sizeof(he_in));
410 he_in.he = MLX5_SET_HOST_ENDIANNESS;
411 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
412 &he_out, sizeof(he_out),
413 MLX5_REG_HOST_ENDIANNESS, 0, 1);
417 static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
420 struct mlx5_enable_hca_mbox_in in;
421 struct mlx5_enable_hca_mbox_out out;
423 memset(&in, 0, sizeof(in));
424 memset(&out, 0, sizeof(out));
425 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA);
426 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
431 return mlx5_cmd_status_to_err(&out.hdr);
436 static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
439 struct mlx5_disable_hca_mbox_in in;
440 struct mlx5_disable_hca_mbox_out out;
442 memset(&in, 0, sizeof(in));
443 memset(&out, 0, sizeof(out));
444 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA);
445 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
450 return mlx5_cmd_status_to_err(&out.hdr);
455 static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
457 struct mlx5_priv *priv = &mdev->priv;
458 struct msix_entry *msix = priv->msix_arr;
459 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
460 int numa_node = priv->numa_node;
463 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
464 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
468 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
469 priv->irq_info[i].mask);
471 err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
473 mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
481 free_cpumask_var(priv->irq_info[i].mask);
485 static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
487 struct mlx5_priv *priv = &mdev->priv;
488 struct msix_entry *msix = priv->msix_arr;
489 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
491 irq_set_affinity_hint(irq, NULL);
492 free_cpumask_var(priv->irq_info[i].mask);
495 static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
500 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
501 err = mlx5_irq_set_affinity_hint(mdev, i);
509 for (i--; i >= 0; i--)
510 mlx5_irq_clear_affinity_hint(mdev, i);
515 static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
519 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
520 mlx5_irq_clear_affinity_hint(mdev, i);
523 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
525 struct mlx5_eq_table *table = &dev->priv.eq_table;
526 struct mlx5_eq *eq, *n;
529 spin_lock(&table->lock);
530 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
531 if (eq->index == vector) {
538 spin_unlock(&table->lock);
542 EXPORT_SYMBOL(mlx5_vector2eqn);
544 static void free_comp_eqs(struct mlx5_core_dev *dev)
546 struct mlx5_eq_table *table = &dev->priv.eq_table;
547 struct mlx5_eq *eq, *n;
549 spin_lock(&table->lock);
550 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
552 spin_unlock(&table->lock);
553 if (mlx5_destroy_unmap_eq(dev, eq))
554 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
557 spin_lock(&table->lock);
559 spin_unlock(&table->lock);
562 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
564 struct mlx5_eq_table *table = &dev->priv.eq_table;
565 char name[MLX5_MAX_IRQ_NAME];
572 INIT_LIST_HEAD(&table->comp_eqs_list);
573 ncomp_vec = table->num_comp_vectors;
574 nent = MLX5_COMP_EQ_SIZE;
575 for (i = 0; i < ncomp_vec; i++) {
576 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
582 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
583 err = mlx5_create_map_eq(dev, eq,
584 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
585 name, &dev->priv.uuari.uars[0]);
590 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
592 spin_lock(&table->lock);
593 list_add_tail(&eq->list, &table->comp_eqs_list);
594 spin_unlock(&table->lock);
604 #ifdef CONFIG_MLX5_CORE_EN
605 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
607 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
608 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
609 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
610 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
614 memset(query_in, 0, sizeof(query_in));
615 memset(query_out, 0, sizeof(query_out));
617 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
619 err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
620 query_out, sizeof(query_out));
622 if (((struct mlx5_outbox_hdr *)query_out)->status ==
623 MLX5_CMD_STAT_BAD_OP_ERR) {
624 pr_debug("Only ISSI 0 is supported\n");
628 pr_err("failed to query ISSI\n");
632 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
634 if (sup_issi & (1 << 1)) {
635 memset(set_in, 0, sizeof(set_in));
636 memset(set_out, 0, sizeof(set_out));
638 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
639 MLX5_SET(set_issi_in, set_in, current_issi, 1);
641 err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
642 set_out, sizeof(set_out));
644 pr_err("failed to set ISSI=1\n");
651 } else if (sup_issi & (1 << 0) || !sup_issi) {
659 static int map_bf_area(struct mlx5_core_dev *dev)
661 resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
662 resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
664 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
666 return dev->priv.bf_mapping ? 0 : -ENOMEM;
669 static void unmap_bf_area(struct mlx5_core_dev *dev)
671 if (dev->priv.bf_mapping)
672 io_mapping_free(dev->priv.bf_mapping);
675 static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
677 struct mlx5_priv *priv = &dev->priv;
681 pci_set_drvdata(dev->pdev, dev);
682 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
683 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
685 mutex_init(&priv->pgdir_mutex);
686 INIT_LIST_HEAD(&priv->pgdir_list);
687 spin_lock_init(&priv->mkey_lock);
689 mutex_init(&priv->alloc_mutex);
691 priv->numa_node = dev_to_node(&dev->pdev->dev);
693 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
697 err = pci_enable_device(pdev);
699 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
703 err = request_bar(pdev);
705 dev_err(&pdev->dev, "error requesting BARs, aborting\n");
709 pci_set_master(pdev);
711 err = set_dma_caps(pdev);
713 dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
717 dev->iseg_base = pci_resource_start(dev->pdev, 0);
718 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
721 dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
724 dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
725 fw_rev_min(dev), fw_rev_sub(dev));
727 err = mlx5_cmd_init(dev);
729 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
733 mlx5_pagealloc_init(dev);
735 err = mlx5_core_enable_hca(dev);
737 dev_err(&pdev->dev, "enable hca failed\n");
738 goto err_pagealloc_cleanup;
741 #ifdef CONFIG_MLX5_CORE_EN
742 err = mlx5_core_set_issi(dev);
744 dev_err(&pdev->dev, "failed to set issi\n");
745 goto err_disable_hca;
749 err = mlx5_satisfy_startup_pages(dev, 1);
751 dev_err(&pdev->dev, "failed to allocate boot pages\n");
752 goto err_disable_hca;
755 err = set_hca_ctrl(dev);
757 dev_err(&pdev->dev, "set_hca_ctrl failed\n");
758 goto reclaim_boot_pages;
761 err = handle_hca_cap(dev);
763 dev_err(&pdev->dev, "handle_hca_cap failed\n");
764 goto reclaim_boot_pages;
767 err = mlx5_satisfy_startup_pages(dev, 0);
769 dev_err(&pdev->dev, "failed to allocate init pages\n");
770 goto reclaim_boot_pages;
773 err = mlx5_pagealloc_start(dev);
775 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
776 goto reclaim_boot_pages;
779 err = mlx5_cmd_init_hca(dev);
781 dev_err(&pdev->dev, "init hca failed\n");
782 goto err_pagealloc_stop;
785 mlx5_start_health_poll(dev);
787 err = mlx5_query_hca_caps(dev);
789 dev_err(&pdev->dev, "query hca failed\n");
793 err = mlx5_query_board_id(dev);
795 dev_err(&pdev->dev, "query board id failed\n");
799 err = mlx5_enable_msix(dev);
801 dev_err(&pdev->dev, "enable msix failed\n");
805 err = mlx5_eq_init(dev);
807 dev_err(&pdev->dev, "failed to initialize eq\n");
811 err = mlx5_alloc_uuars(dev, &priv->uuari);
813 dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
817 err = mlx5_start_eqs(dev);
819 dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
823 err = alloc_comp_eqs(dev);
825 dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
829 if (map_bf_area(dev))
830 dev_err(&pdev->dev, "Failed to map blue flame area\n");
832 err = mlx5_irq_set_affinity_hints(dev);
834 dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
835 goto err_unmap_bf_area;
838 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
840 mlx5_init_cq_table(dev);
841 mlx5_init_qp_table(dev);
842 mlx5_init_srq_table(dev);
843 mlx5_init_mr_table(dev);
856 mlx5_free_uuars(dev, &priv->uuari);
859 mlx5_eq_cleanup(dev);
862 mlx5_disable_msix(dev);
865 mlx5_stop_health_poll(dev);
866 if (mlx5_cmd_teardown_hca(dev)) {
867 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
872 mlx5_pagealloc_stop(dev);
875 mlx5_reclaim_startup_pages(dev);
878 mlx5_core_disable_hca(dev);
880 err_pagealloc_cleanup:
881 mlx5_pagealloc_cleanup(dev);
882 mlx5_cmd_cleanup(dev);
888 pci_clear_master(dev->pdev);
889 release_bar(dev->pdev);
892 pci_disable_device(dev->pdev);
895 debugfs_remove(priv->dbg_root);
899 static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
901 struct mlx5_priv *priv = &dev->priv;
903 mlx5_cleanup_srq_table(dev);
904 mlx5_cleanup_qp_table(dev);
905 mlx5_cleanup_cq_table(dev);
906 mlx5_irq_clear_affinity_hints(dev);
910 mlx5_free_uuars(dev, &priv->uuari);
911 mlx5_eq_cleanup(dev);
912 mlx5_disable_msix(dev);
913 mlx5_stop_health_poll(dev);
914 if (mlx5_cmd_teardown_hca(dev)) {
915 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
918 mlx5_pagealloc_stop(dev);
919 mlx5_reclaim_startup_pages(dev);
920 mlx5_core_disable_hca(dev);
921 mlx5_pagealloc_cleanup(dev);
922 mlx5_cmd_cleanup(dev);
924 pci_clear_master(dev->pdev);
925 release_bar(dev->pdev);
926 pci_disable_device(dev->pdev);
927 debugfs_remove(priv->dbg_root);
930 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
932 struct mlx5_device_context *dev_ctx;
933 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
935 dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
937 pr_warn("mlx5_add_device: alloc context failed\n");
941 dev_ctx->intf = intf;
942 dev_ctx->context = intf->add(dev);
944 if (dev_ctx->context) {
945 spin_lock_irq(&priv->ctx_lock);
946 list_add_tail(&dev_ctx->list, &priv->ctx_list);
947 spin_unlock_irq(&priv->ctx_lock);
953 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
955 struct mlx5_device_context *dev_ctx;
956 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
958 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
959 if (dev_ctx->intf == intf) {
960 spin_lock_irq(&priv->ctx_lock);
961 list_del(&dev_ctx->list);
962 spin_unlock_irq(&priv->ctx_lock);
964 intf->remove(dev, dev_ctx->context);
969 static int mlx5_register_device(struct mlx5_core_dev *dev)
971 struct mlx5_priv *priv = &dev->priv;
972 struct mlx5_interface *intf;
974 mutex_lock(&intf_mutex);
975 list_add_tail(&priv->dev_list, &dev_list);
976 list_for_each_entry(intf, &intf_list, list)
977 mlx5_add_device(intf, priv);
978 mutex_unlock(&intf_mutex);
982 static void mlx5_unregister_device(struct mlx5_core_dev *dev)
984 struct mlx5_priv *priv = &dev->priv;
985 struct mlx5_interface *intf;
987 mutex_lock(&intf_mutex);
988 list_for_each_entry(intf, &intf_list, list)
989 mlx5_remove_device(intf, priv);
990 list_del(&priv->dev_list);
991 mutex_unlock(&intf_mutex);
994 int mlx5_register_interface(struct mlx5_interface *intf)
996 struct mlx5_priv *priv;
998 if (!intf->add || !intf->remove)
1001 mutex_lock(&intf_mutex);
1002 list_add_tail(&intf->list, &intf_list);
1003 list_for_each_entry(priv, &dev_list, dev_list)
1004 mlx5_add_device(intf, priv);
1005 mutex_unlock(&intf_mutex);
1009 EXPORT_SYMBOL(mlx5_register_interface);
1011 void mlx5_unregister_interface(struct mlx5_interface *intf)
1013 struct mlx5_priv *priv;
1015 mutex_lock(&intf_mutex);
1016 list_for_each_entry(priv, &dev_list, dev_list)
1017 mlx5_remove_device(intf, priv);
1018 list_del(&intf->list);
1019 mutex_unlock(&intf_mutex);
1021 EXPORT_SYMBOL(mlx5_unregister_interface);
1023 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
1025 struct mlx5_priv *priv = &mdev->priv;
1026 struct mlx5_device_context *dev_ctx;
1027 unsigned long flags;
1028 void *result = NULL;
1030 spin_lock_irqsave(&priv->ctx_lock, flags);
1032 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
1033 if ((dev_ctx->intf->protocol == protocol) &&
1034 dev_ctx->intf->get_dev) {
1035 result = dev_ctx->intf->get_dev(dev_ctx->context);
1039 spin_unlock_irqrestore(&priv->ctx_lock, flags);
1043 EXPORT_SYMBOL(mlx5_get_protocol_dev);
1045 static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1046 unsigned long param)
1048 struct mlx5_priv *priv = &dev->priv;
1049 struct mlx5_device_context *dev_ctx;
1050 unsigned long flags;
1052 spin_lock_irqsave(&priv->ctx_lock, flags);
1054 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
1055 if (dev_ctx->intf->event)
1056 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
1058 spin_unlock_irqrestore(&priv->ctx_lock, flags);
1061 struct mlx5_core_event_handler {
1062 void (*event)(struct mlx5_core_dev *dev,
1063 enum mlx5_dev_event event,
1067 #define MLX5_IB_MOD "mlx5_ib"
1069 static int init_one(struct pci_dev *pdev,
1070 const struct pci_device_id *id)
1072 struct mlx5_core_dev *dev;
1073 struct mlx5_priv *priv;
1076 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1078 dev_err(&pdev->dev, "kzalloc failed\n");
1083 pci_set_drvdata(pdev, dev);
1085 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
1086 pr_warn("selected profile out of range, selecting default (%d)\n",
1088 prof_sel = MLX5_DEFAULT_PROF;
1090 dev->profile = &profile[prof_sel];
1091 dev->event = mlx5_core_event;
1093 INIT_LIST_HEAD(&priv->ctx_list);
1094 spin_lock_init(&priv->ctx_lock);
1095 err = mlx5_dev_init(dev, pdev);
1097 dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
1101 err = mlx5_register_device(dev);
1103 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
1107 err = request_module_nowait(MLX5_IB_MOD);
1109 pr_info("failed request module on %s\n", MLX5_IB_MOD);
1114 mlx5_dev_cleanup(dev);
1119 static void remove_one(struct pci_dev *pdev)
1121 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1123 mlx5_unregister_device(dev);
1124 mlx5_dev_cleanup(dev);
1128 static const struct pci_device_id mlx5_core_pci_table[] = {
1129 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
1130 { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */
1131 { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */
1132 { PCI_VDEVICE(MELLANOX, 0x1014) }, /* ConnectX-4 VF */
1133 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
1134 { PCI_VDEVICE(MELLANOX, 0x1016) }, /* ConnectX-4LX VF */
1138 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1140 static struct pci_driver mlx5_core_driver = {
1141 .name = DRIVER_NAME,
1142 .id_table = mlx5_core_pci_table,
1144 .remove = remove_one
1147 static int __init init(void)
1151 mlx5_register_debugfs();
1152 mlx5_core_wq = create_singlethread_workqueue("mlx5_core_wq");
1153 if (!mlx5_core_wq) {
1159 err = pci_register_driver(&mlx5_core_driver);
1163 #ifdef CONFIG_MLX5_CORE_EN
1170 mlx5_health_cleanup();
1171 destroy_workqueue(mlx5_core_wq);
1173 mlx5_unregister_debugfs();
1177 static void __exit cleanup(void)
1179 #ifdef CONFIG_MLX5_CORE_EN
1182 pci_unregister_driver(&mlx5_core_driver);
1183 mlx5_health_cleanup();
1184 destroy_workqueue(mlx5_core_wq);
1185 mlx5_unregister_debugfs();
1189 module_exit(cleanup);