2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/kmod.h>
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION);
58 struct workqueue_struct *mlx4_wq;
60 #ifdef CONFIG_MLX4_DEBUG
62 int mlx4_debug_level = 0;
63 module_param_named(debug_level, mlx4_debug_level, int, 0644);
64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
66 #endif /* CONFIG_MLX4_DEBUG */
71 module_param(msi_x, int, 0444);
72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
74 #else /* CONFIG_PCI_MSI */
78 #endif /* CONFIG_PCI_MSI */
80 static uint8_t num_vfs[3] = {0, 0, 0};
81 static int num_vfs_argc;
82 module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
83 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2");
86 static uint8_t probe_vf[3] = {0, 0, 0};
87 static int probe_vfs_argc;
88 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
89 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2");
92 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
93 module_param_named(log_num_mgm_entry_size,
94 mlx4_log_num_mgm_entry_size, int, 0444);
95 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
96 " of qp per mcg, for example:"
97 " 10 gives 248.range: 7 <="
98 " log_num_mgm_entry_size <= 12."
99 " To activate device managed"
100 " flow steering when available, set to -1");
102 static bool enable_64b_cqe_eqe = true;
103 module_param(enable_64b_cqe_eqe, bool, 0444);
104 MODULE_PARM_DESC(enable_64b_cqe_eqe,
105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
108 MLX4_FUNC_CAP_EQE_CQE_STRIDE)
110 static char mlx4_version[] =
111 DRV_NAME ": Mellanox ConnectX core driver v"
112 DRV_VERSION " (" DRV_RELDATE ")\n";
114 static struct mlx4_profile default_profile = {
117 .rdmarc_per_qp = 1 << 4,
121 .num_mtt = 1 << 20, /* It is really num mtt segements */
124 static struct mlx4_profile low_mem_profile = {
127 .rdmarc_per_qp = 1 << 4,
134 static int log_num_mac = 7;
135 module_param_named(log_num_mac, log_num_mac, int, 0444);
136 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
138 static int log_num_vlan;
139 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
140 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
141 /* Log2 max number of VLANs per ETH port (0-7) */
142 #define MLX4_LOG_NUM_VLANS 7
143 #define MLX4_MIN_LOG_NUM_VLANS 0
144 #define MLX4_MIN_LOG_NUM_MAC 1
146 static bool use_prio;
147 module_param_named(use_prio, use_prio, bool, 0444);
148 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
150 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
151 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
152 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
154 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
155 static int arr_argc = 2;
156 module_param_array(port_type_array, int, &arr_argc, 0444);
157 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
158 "1 for IB, 2 for Ethernet");
160 struct mlx4_port_config {
161 struct list_head list;
162 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
163 struct pci_dev *pdev;
166 static atomic_t pf_loading = ATOMIC_INIT(0);
168 int mlx4_check_port_params(struct mlx4_dev *dev,
169 enum mlx4_port_type *port_type)
173 for (i = 0; i < dev->caps.num_ports - 1; i++) {
174 if (port_type[i] != port_type[i + 1]) {
175 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
176 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
182 for (i = 0; i < dev->caps.num_ports; i++) {
183 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
184 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
192 static void mlx4_set_port_mask(struct mlx4_dev *dev)
196 for (i = 1; i <= dev->caps.num_ports; ++i)
197 dev->caps.port_mask[i] = dev->caps.port_type[i];
201 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
204 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
207 struct mlx4_func func;
209 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
210 err = mlx4_QUERY_FUNC(dev, &func, 0);
212 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
215 dev_cap->max_eqs = func.max_eq;
216 dev_cap->reserved_eqs = func.rsvd_eqs;
217 dev_cap->reserved_uars = func.rsvd_uars;
218 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
223 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
225 struct mlx4_caps *dev_cap = &dev->caps;
227 /* FW not supporting or cancelled by user */
228 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
229 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
232 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
233 * When FW has NCSI it may decide not to report 64B CQE/EQEs
235 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
236 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
237 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
238 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
242 if (cache_line_size() == 128 || cache_line_size() == 256) {
243 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
244 /* Changing the real data inside CQE size to 32B */
245 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
246 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
248 if (mlx4_is_master(dev))
249 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
251 mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n");
252 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
253 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
257 static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
258 struct mlx4_port_cap *port_cap)
260 dev->caps.vl_cap[port] = port_cap->max_vl;
261 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu;
262 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids;
263 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
264 /* set gid and pkey table operating lengths by default
265 * to non-sriov values
267 dev->caps.gid_table_len[port] = port_cap->max_gids;
268 dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
269 dev->caps.port_width_cap[port] = port_cap->max_port_width;
270 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
271 dev->caps.def_mac[port] = port_cap->def_mac;
272 dev->caps.supported_type[port] = port_cap->supported_port_types;
273 dev->caps.suggested_type[port] = port_cap->suggested_type;
274 dev->caps.default_sense[port] = port_cap->default_sense;
275 dev->caps.trans_type[port] = port_cap->trans_type;
276 dev->caps.vendor_oui[port] = port_cap->vendor_oui;
277 dev->caps.wavelength[port] = port_cap->wavelength;
278 dev->caps.trans_code[port] = port_cap->trans_code;
283 static int mlx4_dev_port(struct mlx4_dev *dev, int port,
284 struct mlx4_port_cap *port_cap)
288 err = mlx4_QUERY_PORT(dev, port, port_cap);
291 mlx4_err(dev, "QUERY_PORT command failed.\n");
296 #define MLX4_A0_STEERING_TABLE_SIZE 256
297 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
302 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
304 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
308 if (dev_cap->min_page_sz > PAGE_SIZE) {
309 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
310 dev_cap->min_page_sz, PAGE_SIZE);
313 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
314 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
315 dev_cap->num_ports, MLX4_MAX_PORTS);
319 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
320 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
322 (unsigned long long) pci_resource_len(dev->pdev, 2));
326 dev->caps.num_ports = dev_cap->num_ports;
327 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
328 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
329 dev->caps.num_sys_eqs :
331 for (i = 1; i <= dev->caps.num_ports; ++i) {
332 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
334 mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
339 dev->caps.uar_page_size = PAGE_SIZE;
340 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
341 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
342 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
343 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
344 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
345 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
346 dev->caps.max_wqes = dev_cap->max_qp_sz;
347 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
348 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
349 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
350 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
351 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
352 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
354 * Subtract 1 from the limit because we need to allocate a
355 * spare CQE so the HCA HW can tell the difference between an
356 * empty CQ and a full CQ.
358 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
359 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
360 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
361 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
362 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
364 /* The first 128 UARs are used for EQ doorbells */
365 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
366 dev->caps.reserved_pds = dev_cap->reserved_pds;
367 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
368 dev_cap->reserved_xrcds : 0;
369 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
370 dev_cap->max_xrcds : 0;
371 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
373 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
374 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
375 dev->caps.flags = dev_cap->flags;
376 dev->caps.flags2 = dev_cap->flags2;
377 dev->caps.bmme_flags = dev_cap->bmme_flags;
378 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
379 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
380 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
381 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
383 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
384 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
385 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
386 /* Don't do sense port on multifunction devices (for now at least) */
387 if (mlx4_is_mfunc(dev))
388 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
390 if (mlx4_low_memory_profile()) {
391 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC;
392 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
394 dev->caps.log_num_macs = log_num_mac;
395 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
398 for (i = 1; i <= dev->caps.num_ports; ++i) {
399 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
400 if (dev->caps.supported_type[i]) {
401 /* if only ETH is supported - assign ETH */
402 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
403 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
404 /* if only IB is supported, assign IB */
405 else if (dev->caps.supported_type[i] ==
407 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
409 /* if IB and ETH are supported, we set the port
410 * type according to user selection of port type;
411 * if user selected none, take the FW hint */
412 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
413 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
414 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
416 dev->caps.port_type[i] = port_type_array[i - 1];
420 * Link sensing is allowed on the port if 3 conditions are true:
421 * 1. Both protocols are supported on the port.
422 * 2. Different types are supported on the port
423 * 3. FW declared that it supports link sensing
425 mlx4_priv(dev)->sense.sense_allowed[i] =
426 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
427 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
428 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
431 * If "default_sense" bit is set, we move the port to "AUTO" mode
432 * and perform sense_port FW command to try and set the correct
433 * port type from beginning
435 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
436 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
437 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
438 mlx4_SENSE_PORT(dev, i, &sensed_port);
439 if (sensed_port != MLX4_PORT_TYPE_NONE)
440 dev->caps.port_type[i] = sensed_port;
442 dev->caps.possible_type[i] = dev->caps.port_type[i];
445 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
446 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
447 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
448 i, 1 << dev->caps.log_num_macs);
450 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
451 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
452 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
453 i, 1 << dev->caps.log_num_vlans);
457 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
459 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
460 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
461 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
462 (1 << dev->caps.log_num_macs) *
463 (1 << dev->caps.log_num_vlans) *
465 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
466 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
467 MLX4_A0_STEERING_TABLE_SIZE;
469 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
470 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
471 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
472 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
474 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
476 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
478 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
479 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
480 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
481 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
484 if (dev_cap->flags2 &
485 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
486 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
487 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
488 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
489 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
493 if ((dev->caps.flags &
494 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
496 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
498 if (!mlx4_is_slave(dev)) {
499 mlx4_enable_cqe_eqe_stride(dev);
500 dev->caps.alloc_res_qp_mask =
501 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
504 dev->caps.alloc_res_qp_mask = 0;
510 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev,
511 enum pci_bus_speed *speed,
512 enum pcie_link_width *width)
514 u32 lnkcap1, lnkcap2;
517 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
519 *speed = PCI_SPEED_UNKNOWN;
520 *width = PCIE_LNK_WIDTH_UNKNOWN;
522 err1 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP, &lnkcap1);
523 err2 = pcie_capability_read_dword(dev->pdev, PCI_EXP_LNKCAP2, &lnkcap2);
524 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */
525 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
526 *speed = PCIE_SPEED_8_0GT;
527 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
528 *speed = PCIE_SPEED_5_0GT;
529 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
530 *speed = PCIE_SPEED_2_5GT;
533 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT;
534 if (!lnkcap2) { /* pre-r3.0 */
535 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB)
536 *speed = PCIE_SPEED_5_0GT;
537 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB)
538 *speed = PCIE_SPEED_2_5GT;
542 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) {
544 err2 ? err2 : -EINVAL;
549 static void mlx4_check_pcie_caps(struct mlx4_dev *dev)
551 enum pcie_link_width width, width_cap;
552 enum pci_bus_speed speed, speed_cap;
555 #define PCIE_SPEED_STR(speed) \
556 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
557 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
558 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
561 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap);
564 "Unable to determine PCIe device BW capabilities\n");
568 err = pcie_get_minimum_link(dev->pdev, &speed, &width);
569 if (err || speed == PCI_SPEED_UNKNOWN ||
570 width == PCIE_LNK_WIDTH_UNKNOWN) {
572 "Unable to determine PCI device chain minimum BW\n");
576 if (width != width_cap || speed != speed_cap)
578 "PCIe BW is different than device's capability\n");
580 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n",
581 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap));
582 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n",
587 /*The function checks if there are live vf, return the num of them*/
588 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
590 struct mlx4_priv *priv = mlx4_priv(dev);
591 struct mlx4_slave_state *s_state;
595 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
596 s_state = &priv->mfunc.master.slave_state[i];
597 if (s_state->active && s_state->last_cmd !=
598 MLX4_COMM_CMD_RESET) {
599 mlx4_warn(dev, "%s: slave: %d is still active\n",
607 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
609 u32 qk = MLX4_RESERVED_QKEY_BASE;
611 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
612 qpn < dev->phys_caps.base_proxy_sqpn)
615 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
617 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
619 qk += qpn - dev->phys_caps.base_proxy_sqpn;
623 EXPORT_SYMBOL(mlx4_get_parav_qkey);
625 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
627 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
629 if (!mlx4_is_master(dev))
632 priv->virt2phys_pkey[slave][port - 1][i] = val;
634 EXPORT_SYMBOL(mlx4_sync_pkey_table);
636 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
638 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
640 if (!mlx4_is_master(dev))
643 priv->slave_node_guids[slave] = guid;
645 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
647 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
649 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
651 if (!mlx4_is_master(dev))
654 return priv->slave_node_guids[slave];
656 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
658 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
660 struct mlx4_priv *priv = mlx4_priv(dev);
661 struct mlx4_slave_state *s_slave;
663 if (!mlx4_is_master(dev))
666 s_slave = &priv->mfunc.master.slave_state[slave];
667 return !!s_slave->active;
669 EXPORT_SYMBOL(mlx4_is_slave_active);
671 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
672 struct mlx4_dev_cap *dev_cap,
673 struct mlx4_init_hca_param *hca_param)
675 dev->caps.steering_mode = hca_param->steering_mode;
676 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
677 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
678 dev->caps.fs_log_max_ucast_qp_range_size =
679 dev_cap->fs_log_max_ucast_qp_range_size;
681 dev->caps.num_qp_per_mgm =
682 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
684 mlx4_dbg(dev, "Steering mode is: %s\n",
685 mlx4_steering_mode_str(dev->caps.steering_mode));
688 static int mlx4_slave_cap(struct mlx4_dev *dev)
692 struct mlx4_dev_cap dev_cap;
693 struct mlx4_func_cap func_cap;
694 struct mlx4_init_hca_param hca_param;
697 memset(&hca_param, 0, sizeof(hca_param));
698 err = mlx4_QUERY_HCA(dev, &hca_param);
700 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
704 /* fail if the hca has an unknown global capability
705 * at this time global_caps should be always zeroed
707 if (hca_param.global_caps) {
708 mlx4_err(dev, "Unknown hca global capabilities\n");
712 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
714 dev->caps.hca_core_clock = hca_param.hca_core_clock;
716 memset(&dev_cap, 0, sizeof(dev_cap));
717 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
718 err = mlx4_dev_cap(dev, &dev_cap);
720 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
724 err = mlx4_QUERY_FW(dev);
726 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
728 page_size = ~dev->caps.page_size_cap + 1;
729 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
730 if (page_size > PAGE_SIZE) {
731 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
732 page_size, PAGE_SIZE);
736 /* slave gets uar page size from QUERY_HCA fw command */
737 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
739 /* TODO: relax this assumption */
740 if (dev->caps.uar_page_size != PAGE_SIZE) {
741 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
742 dev->caps.uar_page_size, PAGE_SIZE);
746 memset(&func_cap, 0, sizeof(func_cap));
747 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
749 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
754 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
755 PF_CONTEXT_BEHAVIOUR_MASK) {
756 mlx4_err(dev, "Unknown pf context behaviour\n");
760 dev->caps.num_ports = func_cap.num_ports;
761 dev->quotas.qp = func_cap.qp_quota;
762 dev->quotas.srq = func_cap.srq_quota;
763 dev->quotas.cq = func_cap.cq_quota;
764 dev->quotas.mpt = func_cap.mpt_quota;
765 dev->quotas.mtt = func_cap.mtt_quota;
766 dev->caps.num_qps = 1 << hca_param.log_num_qps;
767 dev->caps.num_srqs = 1 << hca_param.log_num_srqs;
768 dev->caps.num_cqs = 1 << hca_param.log_num_cqs;
769 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz;
770 dev->caps.num_eqs = func_cap.max_eq;
771 dev->caps.reserved_eqs = func_cap.reserved_eq;
772 dev->caps.num_pds = MLX4_NUM_PDS;
773 dev->caps.num_mgms = 0;
774 dev->caps.num_amgms = 0;
776 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
777 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
778 dev->caps.num_ports, MLX4_MAX_PORTS);
782 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
783 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
784 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
785 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
786 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
788 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
789 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy ||
790 !dev->caps.qp0_qkey) {
795 for (i = 1; i <= dev->caps.num_ports; ++i) {
796 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap);
798 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
802 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
803 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn;
804 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn;
805 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn;
806 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
807 dev->caps.port_mask[i] = dev->caps.port_type[i];
808 dev->caps.phys_port_id[i] = func_cap.phys_port_id;
809 if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
810 &dev->caps.gid_table_len[i],
811 &dev->caps.pkey_table_len[i]))
815 if (dev->caps.uar_page_size * (dev->caps.num_uars -
816 dev->caps.reserved_uars) >
817 pci_resource_len(dev->pdev, 2)) {
818 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
819 dev->caps.uar_page_size * dev->caps.num_uars,
820 (unsigned long long) pci_resource_len(dev->pdev, 2));
824 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
825 dev->caps.eqe_size = 64;
826 dev->caps.eqe_factor = 1;
828 dev->caps.eqe_size = 32;
829 dev->caps.eqe_factor = 0;
832 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
833 dev->caps.cqe_size = 64;
834 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
836 dev->caps.cqe_size = 32;
839 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
840 dev->caps.eqe_size = hca_param.eqe_size;
841 dev->caps.eqe_factor = 0;
844 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
845 dev->caps.cqe_size = hca_param.cqe_size;
846 /* User still need to know when CQE > 32B */
847 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
850 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
851 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
853 slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
855 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
856 dev->caps.bf_reg_size)
857 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
859 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
860 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
865 kfree(dev->caps.qp0_qkey);
866 kfree(dev->caps.qp0_tunnel);
867 kfree(dev->caps.qp0_proxy);
868 kfree(dev->caps.qp1_tunnel);
869 kfree(dev->caps.qp1_proxy);
870 dev->caps.qp0_qkey = NULL;
871 dev->caps.qp0_tunnel = NULL;
872 dev->caps.qp0_proxy = NULL;
873 dev->caps.qp1_tunnel = NULL;
874 dev->caps.qp1_proxy = NULL;
879 static void mlx4_request_modules(struct mlx4_dev *dev)
882 int has_ib_port = false;
883 int has_eth_port = false;
884 #define EN_DRV_NAME "mlx4_en"
885 #define IB_DRV_NAME "mlx4_ib"
887 for (port = 1; port <= dev->caps.num_ports; port++) {
888 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
890 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
895 request_module_nowait(EN_DRV_NAME);
896 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
897 request_module_nowait(IB_DRV_NAME);
901 * Change the port configuration of the device.
902 * Every user of this function must hold the port mutex.
904 int mlx4_change_port_types(struct mlx4_dev *dev,
905 enum mlx4_port_type *port_types)
911 for (port = 0; port < dev->caps.num_ports; port++) {
912 /* Change the port type only if the new type is different
913 * from the current, and not set to Auto */
914 if (port_types[port] != dev->caps.port_type[port + 1])
918 mlx4_unregister_device(dev);
919 for (port = 1; port <= dev->caps.num_ports; port++) {
920 mlx4_CLOSE_PORT(dev, port);
921 dev->caps.port_type[port] = port_types[port - 1];
922 err = mlx4_SET_PORT(dev, port, -1);
924 mlx4_err(dev, "Failed to set port %d, aborting\n",
929 mlx4_set_port_mask(dev);
930 err = mlx4_register_device(dev);
932 mlx4_err(dev, "Failed to register device\n");
935 mlx4_request_modules(dev);
942 static ssize_t show_port_type(struct device *dev,
943 struct device_attribute *attr,
946 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
948 struct mlx4_dev *mdev = info->dev;
952 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
954 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
955 sprintf(buf, "auto (%s)\n", type);
957 sprintf(buf, "%s\n", type);
962 static ssize_t set_port_type(struct device *dev,
963 struct device_attribute *attr,
964 const char *buf, size_t count)
966 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
968 struct mlx4_dev *mdev = info->dev;
969 struct mlx4_priv *priv = mlx4_priv(mdev);
970 enum mlx4_port_type types[MLX4_MAX_PORTS];
971 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
972 static DEFINE_MUTEX(set_port_type_mutex);
976 mutex_lock(&set_port_type_mutex);
978 if (!strcmp(buf, "ib\n"))
979 info->tmp_type = MLX4_PORT_TYPE_IB;
980 else if (!strcmp(buf, "eth\n"))
981 info->tmp_type = MLX4_PORT_TYPE_ETH;
982 else if (!strcmp(buf, "auto\n"))
983 info->tmp_type = MLX4_PORT_TYPE_AUTO;
985 mlx4_err(mdev, "%s is not supported port type\n", buf);
990 mlx4_stop_sense(mdev);
991 mutex_lock(&priv->port_mutex);
992 /* Possible type is always the one that was delivered */
993 mdev->caps.possible_type[info->port] = info->tmp_type;
995 for (i = 0; i < mdev->caps.num_ports; i++) {
996 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
997 mdev->caps.possible_type[i+1];
998 if (types[i] == MLX4_PORT_TYPE_AUTO)
999 types[i] = mdev->caps.port_type[i+1];
1002 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
1003 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
1004 for (i = 1; i <= mdev->caps.num_ports; i++) {
1005 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
1006 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
1012 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1016 mlx4_do_sense_ports(mdev, new_types, types);
1018 err = mlx4_check_port_params(mdev, new_types);
1022 /* We are about to apply the changes after the configuration
1023 * was verified, no need to remember the temporary types
1025 for (i = 0; i < mdev->caps.num_ports; i++)
1026 priv->port[i + 1].tmp_type = 0;
1028 err = mlx4_change_port_types(mdev, new_types);
1031 mlx4_start_sense(mdev);
1032 mutex_unlock(&priv->port_mutex);
1034 mutex_unlock(&set_port_type_mutex);
1036 return err ? err : count;
1047 static inline int int_to_ibta_mtu(int mtu)
1050 case 256: return IB_MTU_256;
1051 case 512: return IB_MTU_512;
1052 case 1024: return IB_MTU_1024;
1053 case 2048: return IB_MTU_2048;
1054 case 4096: return IB_MTU_4096;
1059 static inline int ibta_mtu_to_int(enum ibta_mtu mtu)
1062 case IB_MTU_256: return 256;
1063 case IB_MTU_512: return 512;
1064 case IB_MTU_1024: return 1024;
1065 case IB_MTU_2048: return 2048;
1066 case IB_MTU_4096: return 4096;
1071 static ssize_t show_port_ib_mtu(struct device *dev,
1072 struct device_attribute *attr,
1075 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1077 struct mlx4_dev *mdev = info->dev;
1079 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
1080 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1082 sprintf(buf, "%d\n",
1083 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1087 static ssize_t set_port_ib_mtu(struct device *dev,
1088 struct device_attribute *attr,
1089 const char *buf, size_t count)
1091 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1093 struct mlx4_dev *mdev = info->dev;
1094 struct mlx4_priv *priv = mlx4_priv(mdev);
1095 int err, port, mtu, ibta_mtu = -1;
1097 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1098 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1102 err = kstrtoint(buf, 0, &mtu);
1104 ibta_mtu = int_to_ibta_mtu(mtu);
1106 if (err || ibta_mtu < 0) {
1107 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1111 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1113 mlx4_stop_sense(mdev);
1114 mutex_lock(&priv->port_mutex);
1115 mlx4_unregister_device(mdev);
1116 for (port = 1; port <= mdev->caps.num_ports; port++) {
1117 mlx4_CLOSE_PORT(mdev, port);
1118 err = mlx4_SET_PORT(mdev, port, -1);
1120 mlx4_err(mdev, "Failed to set port %d, aborting\n",
1125 err = mlx4_register_device(mdev);
1127 mutex_unlock(&priv->port_mutex);
1128 mlx4_start_sense(mdev);
1129 return err ? err : count;
1132 static int mlx4_load_fw(struct mlx4_dev *dev)
1134 struct mlx4_priv *priv = mlx4_priv(dev);
1137 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1138 GFP_HIGHUSER | __GFP_NOWARN, 0);
1139 if (!priv->fw.fw_icm) {
1140 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1144 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1146 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1150 err = mlx4_RUN_FW(dev);
1152 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1162 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1166 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1169 struct mlx4_priv *priv = mlx4_priv(dev);
1173 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1175 ((u64) (MLX4_CMPT_TYPE_QP *
1176 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1177 cmpt_entry_sz, dev->caps.num_qps,
1178 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1183 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1185 ((u64) (MLX4_CMPT_TYPE_SRQ *
1186 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1187 cmpt_entry_sz, dev->caps.num_srqs,
1188 dev->caps.reserved_srqs, 0, 0);
1192 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1194 ((u64) (MLX4_CMPT_TYPE_CQ *
1195 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1196 cmpt_entry_sz, dev->caps.num_cqs,
1197 dev->caps.reserved_cqs, 0, 0);
1201 num_eqs = dev->phys_caps.num_phys_eqs;
1202 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1204 ((u64) (MLX4_CMPT_TYPE_EQ *
1205 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1206 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1213 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1216 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1219 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1225 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1226 struct mlx4_init_hca_param *init_hca, u64 icm_size)
1228 struct mlx4_priv *priv = mlx4_priv(dev);
1233 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1235 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1239 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1240 (unsigned long long) icm_size >> 10,
1241 (unsigned long long) aux_pages << 2);
1243 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1244 GFP_HIGHUSER | __GFP_NOWARN, 0);
1245 if (!priv->fw.aux_icm) {
1246 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1250 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1252 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1256 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1258 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1263 num_eqs = dev->phys_caps.num_phys_eqs;
1264 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1265 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1266 num_eqs, num_eqs, 0, 0);
1268 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1269 goto err_unmap_cmpt;
1273 * Reserved MTT entries must be aligned up to a cacheline
1274 * boundary, since the FW will write to them, while the driver
1275 * writes to all other MTT entries. (The variable
1276 * dev->caps.mtt_entry_sz below is really the MTT segment
1277 * size, not the raw entry size)
1279 dev->caps.reserved_mtts =
1280 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1281 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1283 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1285 dev->caps.mtt_entry_sz,
1287 dev->caps.reserved_mtts, 1, 0);
1289 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1293 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1294 init_hca->dmpt_base,
1295 dev_cap->dmpt_entry_sz,
1297 dev->caps.reserved_mrws, 1, 1);
1299 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1303 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1305 dev_cap->qpc_entry_sz,
1307 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1310 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1311 goto err_unmap_dmpt;
1314 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1315 init_hca->auxc_base,
1316 dev_cap->aux_entry_sz,
1318 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1321 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1325 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1326 init_hca->altc_base,
1327 dev_cap->altc_entry_sz,
1329 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1332 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1333 goto err_unmap_auxc;
1336 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1337 init_hca->rdmarc_base,
1338 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1340 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1343 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1344 goto err_unmap_altc;
1347 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1349 dev_cap->cqc_entry_sz,
1351 dev->caps.reserved_cqs, 0, 0);
1353 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1354 goto err_unmap_rdmarc;
1357 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1358 init_hca->srqc_base,
1359 dev_cap->srq_entry_sz,
1361 dev->caps.reserved_srqs, 0, 0);
1363 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1368 * For flow steering device managed mode it is required to use
1369 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1370 * required, but for simplicity just map the whole multicast
1371 * group table now. The table isn't very big and it's a lot
1372 * easier than trying to track ref counts.
1374 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1376 mlx4_get_mgm_entry_size(dev),
1377 dev->caps.num_mgms + dev->caps.num_amgms,
1378 dev->caps.num_mgms + dev->caps.num_amgms,
1381 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1388 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1391 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1394 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1397 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1400 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1403 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1406 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1409 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1412 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1415 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1416 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1417 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1418 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1421 mlx4_UNMAP_ICM_AUX(dev);
1424 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1429 static void mlx4_free_icms(struct mlx4_dev *dev)
1431 struct mlx4_priv *priv = mlx4_priv(dev);
1433 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1434 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1435 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1436 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1437 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1438 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1439 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1440 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1441 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1442 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1443 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1444 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1445 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1446 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1448 mlx4_UNMAP_ICM_AUX(dev);
1449 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1452 static void mlx4_slave_exit(struct mlx4_dev *dev)
1454 struct mlx4_priv *priv = mlx4_priv(dev);
1456 mutex_lock(&priv->cmd.slave_cmd_mutex);
1457 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
1458 mlx4_warn(dev, "Failed to close slave function\n");
1459 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1462 static int map_bf_area(struct mlx4_dev *dev)
1464 struct mlx4_priv *priv = mlx4_priv(dev);
1465 resource_size_t bf_start;
1466 resource_size_t bf_len;
1469 if (!dev->caps.bf_reg_size)
1472 bf_start = pci_resource_start(dev->pdev, 2) +
1473 (dev->caps.num_uars << PAGE_SHIFT);
1474 bf_len = pci_resource_len(dev->pdev, 2) -
1475 (dev->caps.num_uars << PAGE_SHIFT);
1476 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1477 if (!priv->bf_mapping)
1483 static void unmap_bf_area(struct mlx4_dev *dev)
1485 if (mlx4_priv(dev)->bf_mapping)
1486 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1489 cycle_t mlx4_read_clock(struct mlx4_dev *dev)
1491 u32 clockhi, clocklo, clockhi1;
1494 struct mlx4_priv *priv = mlx4_priv(dev);
1496 for (i = 0; i < 10; i++) {
1497 clockhi = swab32(readl(priv->clock_mapping));
1498 clocklo = swab32(readl(priv->clock_mapping + 4));
1499 clockhi1 = swab32(readl(priv->clock_mapping));
1500 if (clockhi == clockhi1)
1504 cycles = (u64) clockhi << 32 | (u64) clocklo;
1508 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1511 static int map_internal_clock(struct mlx4_dev *dev)
1513 struct mlx4_priv *priv = mlx4_priv(dev);
1515 priv->clock_mapping =
1516 ioremap(pci_resource_start(dev->pdev, priv->fw.clock_bar) +
1517 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1519 if (!priv->clock_mapping)
1525 static void unmap_internal_clock(struct mlx4_dev *dev)
1527 struct mlx4_priv *priv = mlx4_priv(dev);
1529 if (priv->clock_mapping)
1530 iounmap(priv->clock_mapping);
1533 static void mlx4_close_hca(struct mlx4_dev *dev)
1535 unmap_internal_clock(dev);
1537 if (mlx4_is_slave(dev))
1538 mlx4_slave_exit(dev);
1540 mlx4_CLOSE_HCA(dev, 0);
1541 mlx4_free_icms(dev);
1545 static void mlx4_close_fw(struct mlx4_dev *dev)
1547 if (!mlx4_is_slave(dev)) {
1549 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
1553 static int mlx4_init_slave(struct mlx4_dev *dev)
1555 struct mlx4_priv *priv = mlx4_priv(dev);
1556 u64 dma = (u64) priv->mfunc.vhcr_dma;
1557 int ret_from_reset = 0;
1559 u32 cmd_channel_ver;
1561 if (atomic_read(&pf_loading)) {
1562 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
1563 return -EPROBE_DEFER;
1566 mutex_lock(&priv->cmd.slave_cmd_mutex);
1567 priv->cmd.max_cmds = 1;
1568 mlx4_warn(dev, "Sending reset\n");
1569 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
1571 /* if we are in the middle of flr the slave will try
1572 * NUM_OF_RESET_RETRIES times before leaving.*/
1573 if (ret_from_reset) {
1574 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
1575 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
1576 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1577 return -EPROBE_DEFER;
1582 /* check the driver version - the slave I/F revision
1583 * must match the master's */
1584 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
1585 cmd_channel_ver = mlx4_comm_get_version();
1587 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
1588 MLX4_COMM_GET_IF_REV(slave_read)) {
1589 mlx4_err(dev, "slave driver version is not supported by the master\n");
1593 mlx4_warn(dev, "Sending vhcr0\n");
1594 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
1597 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
1600 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
1603 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
1606 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1610 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
1611 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1615 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1619 for (i = 1; i <= dev->caps.num_ports; i++) {
1620 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
1621 dev->caps.gid_table_len[i] =
1622 mlx4_get_slave_num_gids(dev, 0, i);
1624 dev->caps.gid_table_len[i] = 1;
1625 dev->caps.pkey_table_len[i] =
1626 dev->phys_caps.pkey_phys_table_len[i] - 1;
1630 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
1632 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
1634 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
1636 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
1640 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
1643 static void choose_steering_mode(struct mlx4_dev *dev,
1644 struct mlx4_dev_cap *dev_cap)
1646 if (mlx4_log_num_mgm_entry_size == -1 &&
1647 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1648 (!mlx4_is_mfunc(dev) ||
1649 (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
1650 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1651 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1652 dev->oper_log_mgm_entry_size =
1653 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
1654 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
1655 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
1656 dev->caps.fs_log_max_ucast_qp_range_size =
1657 dev_cap->fs_log_max_ucast_qp_range_size;
1659 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
1660 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1661 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
1663 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
1665 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
1666 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
1667 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1669 dev->oper_log_mgm_entry_size =
1670 mlx4_log_num_mgm_entry_size > 0 ?
1671 mlx4_log_num_mgm_entry_size :
1672 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
1673 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
1675 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1676 mlx4_steering_mode_str(dev->caps.steering_mode),
1677 dev->oper_log_mgm_entry_size,
1678 mlx4_log_num_mgm_entry_size);
1681 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
1682 struct mlx4_dev_cap *dev_cap)
1684 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
1685 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
1686 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
1688 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
1690 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
1691 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
1694 static int mlx4_init_fw(struct mlx4_dev *dev)
1696 struct mlx4_mod_stat_cfg mlx4_cfg;
1699 if (!mlx4_is_slave(dev)) {
1700 err = mlx4_QUERY_FW(dev);
1703 mlx4_info(dev, "non-primary physical function, skipping\n");
1705 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
1709 err = mlx4_load_fw(dev);
1711 mlx4_err(dev, "Failed to start FW, aborting\n");
1715 mlx4_cfg.log_pg_sz_m = 1;
1716 mlx4_cfg.log_pg_sz = 0;
1717 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
1719 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
1725 static int mlx4_init_hca(struct mlx4_dev *dev)
1727 struct mlx4_priv *priv = mlx4_priv(dev);
1728 struct mlx4_adapter adapter;
1729 struct mlx4_dev_cap dev_cap;
1730 struct mlx4_profile profile;
1731 struct mlx4_init_hca_param init_hca;
1733 struct mlx4_config_dev_params params;
1736 if (!mlx4_is_slave(dev)) {
1737 err = mlx4_dev_cap(dev, &dev_cap);
1739 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
1743 choose_steering_mode(dev, &dev_cap);
1744 choose_tunnel_offload_mode(dev, &dev_cap);
1746 err = mlx4_get_phys_port_id(dev);
1748 mlx4_err(dev, "Fail to get physical port id\n");
1750 if (mlx4_is_master(dev))
1751 mlx4_parav_master_pf_caps(dev);
1753 if (mlx4_low_memory_profile()) {
1754 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
1755 profile = low_mem_profile;
1757 profile = default_profile;
1759 if (dev->caps.steering_mode ==
1760 MLX4_STEERING_MODE_DEVICE_MANAGED)
1761 profile.num_mcg = MLX4_FS_NUM_MCG;
1763 icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
1765 if ((long long) icm_size < 0) {
1770 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
1772 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
1773 init_hca.uar_page_sz = PAGE_SHIFT - 12;
1774 init_hca.mw_enabled = 0;
1775 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
1776 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
1777 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE;
1779 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
1783 err = mlx4_INIT_HCA(dev, &init_hca);
1785 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
1789 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
1790 err = mlx4_query_func(dev, &dev_cap);
1792 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
1794 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
1795 dev->caps.num_eqs = dev_cap.max_eqs;
1796 dev->caps.reserved_eqs = dev_cap.reserved_eqs;
1797 dev->caps.reserved_uars = dev_cap.reserved_uars;
1802 * If TS is supported by FW
1803 * read HCA frequency by QUERY_HCA command
1805 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
1806 memset(&init_hca, 0, sizeof(init_hca));
1807 err = mlx4_QUERY_HCA(dev, &init_hca);
1809 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
1810 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1812 dev->caps.hca_core_clock =
1813 init_hca.hca_core_clock;
1816 /* In case we got HCA frequency 0 - disable timestamping
1817 * to avoid dividing by zero
1819 if (!dev->caps.hca_core_clock) {
1820 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1822 "HCA frequency is 0 - timestamping is not supported\n");
1823 } else if (map_internal_clock(dev)) {
1825 * Map internal clock,
1826 * in case of failure disable timestamping
1828 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1829 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
1833 err = mlx4_init_slave(dev);
1835 if (err != -EPROBE_DEFER)
1836 mlx4_err(dev, "Failed to initialize slave\n");
1840 err = mlx4_slave_cap(dev);
1842 mlx4_err(dev, "Failed to obtain slave caps\n");
1847 if (map_bf_area(dev))
1848 mlx4_dbg(dev, "Failed to map blue flame area\n");
1850 /*Only the master set the ports, all the rest got it from it.*/
1851 if (!mlx4_is_slave(dev))
1852 mlx4_set_port_mask(dev);
1854 err = mlx4_QUERY_ADAPTER(dev, &adapter);
1856 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
1860 /* Query CONFIG_DEV parameters */
1861 err = mlx4_config_dev_retrieval(dev, ¶ms);
1862 if (err && err != -ENOTSUPP) {
1863 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
1865 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
1866 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
1868 priv->eq_table.inta_pin = adapter.inta_pin;
1869 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
1874 unmap_internal_clock(dev);
1877 if (mlx4_is_slave(dev)) {
1878 kfree(dev->caps.qp0_qkey);
1879 kfree(dev->caps.qp0_tunnel);
1880 kfree(dev->caps.qp0_proxy);
1881 kfree(dev->caps.qp1_tunnel);
1882 kfree(dev->caps.qp1_proxy);
1886 if (mlx4_is_slave(dev))
1887 mlx4_slave_exit(dev);
1889 mlx4_CLOSE_HCA(dev, 0);
1892 if (!mlx4_is_slave(dev))
1893 mlx4_free_icms(dev);
1896 if (!mlx4_is_slave(dev)) {
1898 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1903 static int mlx4_init_counters_table(struct mlx4_dev *dev)
1905 struct mlx4_priv *priv = mlx4_priv(dev);
1908 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1911 nent = dev->caps.max_counters;
1912 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
1915 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
1917 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
1920 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1922 struct mlx4_priv *priv = mlx4_priv(dev);
1924 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
1927 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
1934 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
1939 if (mlx4_is_mfunc(dev)) {
1940 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER,
1941 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
1942 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
1944 *idx = get_param_l(&out_param);
1948 return __mlx4_counter_alloc(dev, idx);
1950 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
1952 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1954 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
1958 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1962 if (mlx4_is_mfunc(dev)) {
1963 set_param_l(&in_param, idx);
1964 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
1965 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
1969 __mlx4_counter_free(dev, idx);
1971 EXPORT_SYMBOL_GPL(mlx4_counter_free);
1973 static int mlx4_setup_hca(struct mlx4_dev *dev)
1975 struct mlx4_priv *priv = mlx4_priv(dev);
1978 __be32 ib_port_default_caps;
1980 err = mlx4_init_uar_table(dev);
1982 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
1986 err = mlx4_uar_alloc(dev, &priv->driver_uar);
1988 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
1989 goto err_uar_table_free;
1992 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
1994 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
1999 err = mlx4_init_pd_table(dev);
2001 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
2005 err = mlx4_init_xrcd_table(dev);
2007 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
2008 goto err_pd_table_free;
2011 err = mlx4_init_mr_table(dev);
2013 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
2014 goto err_xrcd_table_free;
2017 if (!mlx4_is_slave(dev)) {
2018 err = mlx4_init_mcg_table(dev);
2020 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
2021 goto err_mr_table_free;
2023 err = mlx4_config_mad_demux(dev);
2025 mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2026 goto err_mcg_table_free;
2030 err = mlx4_init_eq_table(dev);
2032 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2033 goto err_mcg_table_free;
2036 err = mlx4_cmd_use_events(dev);
2038 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2039 goto err_eq_table_free;
2042 err = mlx4_NOP(dev);
2044 if (dev->flags & MLX4_FLAG_MSI_X) {
2045 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2046 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
2047 mlx4_warn(dev, "Trying again without MSI-X\n");
2049 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2050 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
2051 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2057 mlx4_dbg(dev, "NOP command IRQ test passed\n");
2059 err = mlx4_init_cq_table(dev);
2061 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2065 err = mlx4_init_srq_table(dev);
2067 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2068 goto err_cq_table_free;
2071 err = mlx4_init_qp_table(dev);
2073 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2074 goto err_srq_table_free;
2077 err = mlx4_init_counters_table(dev);
2078 if (err && err != -ENOENT) {
2079 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2080 goto err_qp_table_free;
2083 if (!mlx4_is_slave(dev)) {
2084 for (port = 1; port <= dev->caps.num_ports; port++) {
2085 ib_port_default_caps = 0;
2086 err = mlx4_get_port_ib_caps(dev, port,
2087 &ib_port_default_caps);
2089 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2091 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
2093 /* initialize per-slave default ib port capabilities */
2094 if (mlx4_is_master(dev)) {
2096 for (i = 0; i < dev->num_slaves; i++) {
2097 if (i == mlx4_master_func_num(dev))
2099 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
2100 ib_port_default_caps;
2104 if (mlx4_is_mfunc(dev))
2105 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2107 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2109 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2110 dev->caps.pkey_table_len[port] : -1);
2112 mlx4_err(dev, "Failed to set port %d, aborting\n",
2114 goto err_counters_table_free;
2121 err_counters_table_free:
2122 mlx4_cleanup_counters_table(dev);
2125 mlx4_cleanup_qp_table(dev);
2128 mlx4_cleanup_srq_table(dev);
2131 mlx4_cleanup_cq_table(dev);
2134 mlx4_cmd_use_polling(dev);
2137 mlx4_cleanup_eq_table(dev);
2140 if (!mlx4_is_slave(dev))
2141 mlx4_cleanup_mcg_table(dev);
2144 mlx4_cleanup_mr_table(dev);
2146 err_xrcd_table_free:
2147 mlx4_cleanup_xrcd_table(dev);
2150 mlx4_cleanup_pd_table(dev);
2156 mlx4_uar_free(dev, &priv->driver_uar);
2159 mlx4_cleanup_uar_table(dev);
2163 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2165 struct mlx4_priv *priv = mlx4_priv(dev);
2166 struct msix_entry *entries;
2170 int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
2172 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
2175 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
2179 for (i = 0; i < nreq; ++i)
2180 entries[i].entry = i;
2182 nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
2187 } else if (nreq < MSIX_LEGACY_SZ +
2188 dev->caps.num_ports * MIN_MSIX_P_PORT) {
2189 /*Working in legacy mode , all EQ's shared*/
2190 dev->caps.comp_pool = 0;
2191 dev->caps.num_comp_vectors = nreq - 1;
2193 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
2194 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
2196 for (i = 0; i < nreq; ++i)
2197 priv->eq_table.eq[i].irq = entries[i].vector;
2199 dev->flags |= MLX4_FLAG_MSI_X;
2206 dev->caps.num_comp_vectors = 1;
2207 dev->caps.comp_pool = 0;
2209 for (i = 0; i < 2; ++i)
2210 priv->eq_table.eq[i].irq = dev->pdev->irq;
2213 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
2215 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
2220 if (!mlx4_is_slave(dev)) {
2221 mlx4_init_mac_table(dev, &info->mac_table);
2222 mlx4_init_vlan_table(dev, &info->vlan_table);
2223 mlx4_init_roce_gid_table(dev, &info->gid_table);
2224 info->base_qpn = mlx4_get_base_qpn(dev, port);
2227 sprintf(info->dev_name, "mlx4_port%d", port);
2228 info->port_attr.attr.name = info->dev_name;
2229 if (mlx4_is_mfunc(dev))
2230 info->port_attr.attr.mode = S_IRUGO;
2232 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
2233 info->port_attr.store = set_port_type;
2235 info->port_attr.show = show_port_type;
2236 sysfs_attr_init(&info->port_attr.attr);
2238 err = device_create_file(&dev->pdev->dev, &info->port_attr);
2240 mlx4_err(dev, "Failed to create file for port %d\n", port);
2244 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
2245 info->port_mtu_attr.attr.name = info->dev_mtu_name;
2246 if (mlx4_is_mfunc(dev))
2247 info->port_mtu_attr.attr.mode = S_IRUGO;
2249 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR;
2250 info->port_mtu_attr.store = set_port_ib_mtu;
2252 info->port_mtu_attr.show = show_port_ib_mtu;
2253 sysfs_attr_init(&info->port_mtu_attr.attr);
2255 err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr);
2257 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
2258 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
2265 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
2270 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
2271 device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr);
2274 static int mlx4_init_steering(struct mlx4_dev *dev)
2276 struct mlx4_priv *priv = mlx4_priv(dev);
2277 int num_entries = dev->caps.num_ports;
2280 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
2284 for (i = 0; i < num_entries; i++)
2285 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2286 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
2287 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
2292 static void mlx4_clear_steering(struct mlx4_dev *dev)
2294 struct mlx4_priv *priv = mlx4_priv(dev);
2295 struct mlx4_steer_index *entry, *tmp_entry;
2296 struct mlx4_promisc_qp *pqp, *tmp_pqp;
2297 int num_entries = dev->caps.num_ports;
2300 for (i = 0; i < num_entries; i++) {
2301 for (j = 0; j < MLX4_NUM_STEERS; j++) {
2302 list_for_each_entry_safe(pqp, tmp_pqp,
2303 &priv->steer[i].promisc_qps[j],
2305 list_del(&pqp->list);
2308 list_for_each_entry_safe(entry, tmp_entry,
2309 &priv->steer[i].steer_entries[j],
2311 list_del(&entry->list);
2312 list_for_each_entry_safe(pqp, tmp_pqp,
2315 list_del(&pqp->list);
2325 static int extended_func_num(struct pci_dev *pdev)
2327 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
2330 #define MLX4_OWNER_BASE 0x8069c
2331 #define MLX4_OWNER_SIZE 4
2333 static int mlx4_get_ownership(struct mlx4_dev *dev)
2335 void __iomem *owner;
2338 if (pci_channel_offline(dev->pdev))
2341 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
2344 mlx4_err(dev, "Failed to obtain ownership bit\n");
2353 static void mlx4_free_ownership(struct mlx4_dev *dev)
2355 void __iomem *owner;
2357 if (pci_channel_offline(dev->pdev))
2360 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
2363 mlx4_err(dev, "Failed to obtain ownership bit\n");
2371 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
2372 !!((flags) & MLX4_FLAG_MASTER))
2374 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
2375 u8 total_vfs, int existing_vfs)
2377 u64 dev_flags = dev->flags;
2379 dev->dev_vfs = kzalloc(
2380 total_vfs * sizeof(*dev->dev_vfs),
2382 if (NULL == dev->dev_vfs) {
2383 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2385 } else if (!(dev->flags & MLX4_FLAG_SRIOV)) {
2388 atomic_inc(&pf_loading);
2390 if (existing_vfs != total_vfs)
2391 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
2392 existing_vfs, total_vfs);
2394 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
2395 err = pci_enable_sriov(pdev, total_vfs);
2398 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2400 atomic_dec(&pf_loading);
2403 mlx4_warn(dev, "Running in master mode\n");
2404 dev_flags |= MLX4_FLAG_SRIOV |
2406 dev_flags &= ~MLX4_FLAG_SLAVE;
2407 dev->num_vfs = total_vfs;
2414 kfree(dev->dev_vfs);
2415 return dev_flags & ~MLX4_FLAG_MASTER;
2419 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
2422 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
2425 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
2426 /* Checking for 64 VFs as a limitation of CX2 */
2427 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
2428 requested_vfs >= 64) {
2429 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
2431 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
2436 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
2437 int total_vfs, int *nvfs, struct mlx4_priv *priv)
2439 struct mlx4_dev *dev;
2444 struct mlx4_dev_cap *dev_cap = NULL;
2445 int existing_vfs = 0;
2449 INIT_LIST_HEAD(&priv->ctx_list);
2450 spin_lock_init(&priv->ctx_lock);
2452 mutex_init(&priv->port_mutex);
2454 INIT_LIST_HEAD(&priv->pgdir_list);
2455 mutex_init(&priv->pgdir_mutex);
2457 INIT_LIST_HEAD(&priv->bf_list);
2458 mutex_init(&priv->bf_mutex);
2460 dev->rev_id = pdev->revision;
2461 dev->numa_node = dev_to_node(&pdev->dev);
2463 /* Detect if this device is a virtual function */
2464 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2465 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
2466 dev->flags |= MLX4_FLAG_SLAVE;
2468 /* We reset the device and enable SRIOV only for physical
2469 * devices. Try to claim ownership on the device;
2470 * if already taken, skip -- do not allow multiple PFs */
2471 err = mlx4_get_ownership(dev);
2476 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
2481 atomic_set(&priv->opreq_count, 0);
2482 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
2485 * Now reset the HCA before we touch the PCI capabilities or
2486 * attempt a firmware command, since a boot ROM may have left
2487 * the HCA in an undefined state.
2489 err = mlx4_reset(dev);
2491 mlx4_err(dev, "Failed to reset HCA, aborting\n");
2496 existing_vfs = pci_num_vf(pdev);
2497 dev->flags = MLX4_FLAG_MASTER;
2498 dev->num_vfs = total_vfs;
2503 err = mlx4_cmd_init(dev);
2505 mlx4_err(dev, "Failed to init command interface, aborting\n");
2509 /* In slave functions, the communication channel must be initialized
2510 * before posting commands. Also, init num_slaves before calling
2512 if (mlx4_is_mfunc(dev)) {
2513 if (mlx4_is_master(dev)) {
2514 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
2517 dev->num_slaves = 0;
2518 err = mlx4_multi_func_init(dev);
2520 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
2526 err = mlx4_init_fw(dev);
2528 mlx4_err(dev, "Failed to init fw, aborting.\n");
2532 if (mlx4_is_master(dev)) {
2534 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL);
2541 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
2543 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
2547 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
2550 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
2551 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
2554 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2555 dev->flags = dev_flags;
2556 if (!SRIOV_VALID_STATE(dev->flags)) {
2557 mlx4_err(dev, "Invalid SRIOV state\n");
2560 err = mlx4_reset(dev);
2562 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
2568 /* Legacy mode FW requires SRIOV to be enabled before
2569 * doing QUERY_DEV_CAP, since max_eq's value is different if
2572 memset(dev_cap, 0, sizeof(*dev_cap));
2573 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
2575 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
2579 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
2584 err = mlx4_init_hca(dev);
2586 if (err == -EACCES) {
2587 /* Not primary Physical function
2588 * Running in slave mode */
2589 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2590 /* We're not a PF */
2591 if (dev->flags & MLX4_FLAG_SRIOV) {
2593 pci_disable_sriov(pdev);
2594 if (mlx4_is_master(dev))
2595 atomic_dec(&pf_loading);
2596 dev->flags &= ~MLX4_FLAG_SRIOV;
2598 if (!mlx4_is_slave(dev))
2599 mlx4_free_ownership(dev);
2600 dev->flags |= MLX4_FLAG_SLAVE;
2601 dev->flags &= ~MLX4_FLAG_MASTER;
2607 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
2608 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs);
2610 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
2611 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
2612 dev->flags = dev_flags;
2613 err = mlx4_cmd_init(dev);
2615 /* Only VHCR is cleaned up, so could still
2618 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
2622 dev->flags = dev_flags;
2625 if (!SRIOV_VALID_STATE(dev->flags)) {
2626 mlx4_err(dev, "Invalid SRIOV state\n");
2631 /* check if the device is functioning at its maximum possible speed.
2632 * No return code for this call, just warn the user in case of PCI
2633 * express device capabilities are under-satisfied by the bus.
2635 if (!mlx4_is_slave(dev))
2636 mlx4_check_pcie_caps(dev);
2638 /* In master functions, the communication channel must be initialized
2639 * after obtaining its address from fw */
2640 if (mlx4_is_master(dev)) {
2643 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2647 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
2649 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
2653 if (dev->caps.num_ports < 2 &&
2657 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
2658 dev->caps.num_ports);
2661 memcpy(dev->nvfs, nvfs, sizeof(dev->nvfs));
2663 for (i = 0; i < sizeof(dev->nvfs)/sizeof(dev->nvfs[0]); i++) {
2666 for (j = 0; j < dev->nvfs[i]; ++sum, ++j) {
2667 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
2668 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
2669 dev->caps.num_ports;
2673 /* In master functions, the communication channel
2674 * must be initialized after obtaining its address from fw
2676 err = mlx4_multi_func_init(dev);
2678 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
2683 err = mlx4_alloc_eq_table(dev);
2685 goto err_master_mfunc;
2687 priv->msix_ctl.pool_bm = 0;
2688 mutex_init(&priv->msix_ctl.pool_lock);
2690 mlx4_enable_msi_x(dev);
2691 if ((mlx4_is_mfunc(dev)) &&
2692 !(dev->flags & MLX4_FLAG_MSI_X)) {
2694 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
2698 if (!mlx4_is_slave(dev)) {
2699 err = mlx4_init_steering(dev);
2701 goto err_disable_msix;
2704 err = mlx4_setup_hca(dev);
2705 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
2706 !mlx4_is_mfunc(dev)) {
2707 dev->flags &= ~MLX4_FLAG_MSI_X;
2708 dev->caps.num_comp_vectors = 1;
2709 dev->caps.comp_pool = 0;
2710 pci_disable_msix(pdev);
2711 err = mlx4_setup_hca(dev);
2717 mlx4_init_quotas(dev);
2719 for (port = 1; port <= dev->caps.num_ports; port++) {
2720 err = mlx4_init_port_info(dev, port);
2725 err = mlx4_register_device(dev);
2729 mlx4_request_modules(dev);
2731 mlx4_sense_init(dev);
2732 mlx4_start_sense(dev);
2736 if (mlx4_is_master(dev) && dev->num_vfs)
2737 atomic_dec(&pf_loading);
2742 for (--port; port >= 1; --port)
2743 mlx4_cleanup_port_info(&priv->port[port]);
2745 mlx4_cleanup_counters_table(dev);
2746 mlx4_cleanup_qp_table(dev);
2747 mlx4_cleanup_srq_table(dev);
2748 mlx4_cleanup_cq_table(dev);
2749 mlx4_cmd_use_polling(dev);
2750 mlx4_cleanup_eq_table(dev);
2751 mlx4_cleanup_mcg_table(dev);
2752 mlx4_cleanup_mr_table(dev);
2753 mlx4_cleanup_xrcd_table(dev);
2754 mlx4_cleanup_pd_table(dev);
2755 mlx4_cleanup_uar_table(dev);
2758 if (!mlx4_is_slave(dev))
2759 mlx4_clear_steering(dev);
2762 if (dev->flags & MLX4_FLAG_MSI_X)
2763 pci_disable_msix(pdev);
2766 mlx4_free_eq_table(dev);
2769 if (mlx4_is_master(dev))
2770 mlx4_multi_func_cleanup(dev);
2772 if (mlx4_is_slave(dev)) {
2773 kfree(dev->caps.qp0_qkey);
2774 kfree(dev->caps.qp0_tunnel);
2775 kfree(dev->caps.qp0_proxy);
2776 kfree(dev->caps.qp1_tunnel);
2777 kfree(dev->caps.qp1_proxy);
2781 mlx4_close_hca(dev);
2787 if (mlx4_is_slave(dev))
2788 mlx4_multi_func_cleanup(dev);
2791 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
2794 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs)
2795 pci_disable_sriov(pdev);
2797 if (mlx4_is_master(dev) && dev->num_vfs)
2798 atomic_dec(&pf_loading);
2800 kfree(priv->dev.dev_vfs);
2802 if (!mlx4_is_slave(dev))
2803 mlx4_free_ownership(dev);
2809 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
2810 struct mlx4_priv *priv)
2813 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
2814 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
2815 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
2816 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
2817 unsigned total_vfs = 0;
2820 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
2822 err = pci_enable_device(pdev);
2824 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
2828 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
2829 * per port, we must limit the number of VFs to 63 (since their are
2832 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
2833 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
2834 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
2836 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
2838 goto err_disable_pdev;
2841 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
2843 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
2844 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
2845 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
2847 goto err_disable_pdev;
2850 if (total_vfs >= MLX4_MAX_NUM_VF) {
2852 "Requested more VF's (%d) than allowed (%d)\n",
2853 total_vfs, MLX4_MAX_NUM_VF - 1);
2855 goto err_disable_pdev;
2858 for (i = 0; i < MLX4_MAX_PORTS; i++) {
2859 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
2861 "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
2862 nvfs[i] + nvfs[2], i + 1,
2863 MLX4_MAX_NUM_VF_P_PORT - 1);
2865 goto err_disable_pdev;
2869 /* Check for BARs. */
2870 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
2871 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2872 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
2873 pci_dev_data, pci_resource_flags(pdev, 0));
2875 goto err_disable_pdev;
2877 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
2878 dev_err(&pdev->dev, "Missing UAR, aborting\n");
2880 goto err_disable_pdev;
2883 err = pci_request_regions(pdev, DRV_NAME);
2885 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
2886 goto err_disable_pdev;
2889 pci_set_master(pdev);
2891 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2893 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
2894 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2896 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
2897 goto err_release_regions;
2900 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2902 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
2903 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2905 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
2906 goto err_release_regions;
2910 /* Allow large DMA segments, up to the firmware limit of 1 GB */
2911 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
2912 /* Detect if this device is a virtual function */
2913 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2914 /* When acting as pf, we normally skip vfs unless explicitly
2915 * requested to probe them.
2918 unsigned vfs_offset = 0;
2920 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
2921 vfs_offset + nvfs[i] < extended_func_num(pdev);
2922 vfs_offset += nvfs[i], i++)
2924 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
2926 goto err_release_regions;
2928 if ((extended_func_num(pdev) - vfs_offset)
2930 dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
2931 extended_func_num(pdev));
2933 goto err_release_regions;
2938 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv);
2940 goto err_release_regions;
2943 err_release_regions:
2944 pci_release_regions(pdev);
2947 pci_disable_device(pdev);
2948 pci_set_drvdata(pdev, NULL);
2952 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
2954 struct mlx4_priv *priv;
2955 struct mlx4_dev *dev;
2958 printk_once(KERN_INFO "%s", mlx4_version);
2960 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2966 pci_set_drvdata(pdev, dev);
2967 priv->pci_dev_data = id->driver_data;
2969 ret = __mlx4_init_one(pdev, id->driver_data, priv);
2976 static void mlx4_unload_one(struct pci_dev *pdev)
2978 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2979 struct mlx4_priv *priv = mlx4_priv(dev);
2987 pci_dev_data = priv->pci_dev_data;
2989 /* Disabling SR-IOV is not allowed while there are active vf's */
2990 if (mlx4_is_master(dev)) {
2991 active_vfs = mlx4_how_many_lives_vf(dev);
2993 pr_warn("Removing PF when there are active VF's !!\n");
2994 pr_warn("Will not disable SR-IOV.\n");
2997 mlx4_stop_sense(dev);
2998 mlx4_unregister_device(dev);
3000 for (p = 1; p <= dev->caps.num_ports; p++) {
3001 mlx4_cleanup_port_info(&priv->port[p]);
3002 mlx4_CLOSE_PORT(dev, p);
3005 if (mlx4_is_master(dev))
3006 mlx4_free_resource_tracker(dev,
3007 RES_TR_FREE_SLAVES_ONLY);
3009 mlx4_cleanup_counters_table(dev);
3010 mlx4_cleanup_qp_table(dev);
3011 mlx4_cleanup_srq_table(dev);
3012 mlx4_cleanup_cq_table(dev);
3013 mlx4_cmd_use_polling(dev);
3014 mlx4_cleanup_eq_table(dev);
3015 mlx4_cleanup_mcg_table(dev);
3016 mlx4_cleanup_mr_table(dev);
3017 mlx4_cleanup_xrcd_table(dev);
3018 mlx4_cleanup_pd_table(dev);
3020 if (mlx4_is_master(dev))
3021 mlx4_free_resource_tracker(dev,
3022 RES_TR_FREE_STRUCTS_ONLY);
3025 mlx4_uar_free(dev, &priv->driver_uar);
3026 mlx4_cleanup_uar_table(dev);
3027 if (!mlx4_is_slave(dev))
3028 mlx4_clear_steering(dev);
3029 mlx4_free_eq_table(dev);
3030 if (mlx4_is_master(dev))
3031 mlx4_multi_func_cleanup(dev);
3032 mlx4_close_hca(dev);
3034 if (mlx4_is_slave(dev))
3035 mlx4_multi_func_cleanup(dev);
3036 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3038 if (dev->flags & MLX4_FLAG_MSI_X)
3039 pci_disable_msix(pdev);
3040 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
3041 mlx4_warn(dev, "Disabling SR-IOV\n");
3042 pci_disable_sriov(pdev);
3043 dev->flags &= ~MLX4_FLAG_SRIOV;
3047 if (!mlx4_is_slave(dev))
3048 mlx4_free_ownership(dev);
3050 kfree(dev->caps.qp0_qkey);
3051 kfree(dev->caps.qp0_tunnel);
3052 kfree(dev->caps.qp0_proxy);
3053 kfree(dev->caps.qp1_tunnel);
3054 kfree(dev->caps.qp1_proxy);
3055 kfree(dev->dev_vfs);
3057 memset(priv, 0, sizeof(*priv));
3058 priv->pci_dev_data = pci_dev_data;
3062 static void mlx4_remove_one(struct pci_dev *pdev)
3064 struct mlx4_dev *dev = pci_get_drvdata(pdev);
3065 struct mlx4_priv *priv = mlx4_priv(dev);
3067 mlx4_unload_one(pdev);
3068 pci_release_regions(pdev);
3069 pci_disable_device(pdev);
3071 pci_set_drvdata(pdev, NULL);
3074 int mlx4_restart_one(struct pci_dev *pdev)
3076 struct mlx4_dev *dev = pci_get_drvdata(pdev);
3077 struct mlx4_priv *priv = mlx4_priv(dev);
3078 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3079 int pci_dev_data, err, total_vfs;
3081 pci_dev_data = priv->pci_dev_data;
3082 total_vfs = dev->num_vfs;
3083 memcpy(nvfs, dev->nvfs, sizeof(dev->nvfs));
3085 mlx4_unload_one(pdev);
3086 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv);
3088 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
3089 __func__, pci_name(pdev), err);
3096 static const struct pci_device_id mlx4_pci_table[] = {
3097 /* MT25408 "Hermon" SDR */
3098 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3099 /* MT25408 "Hermon" DDR */
3100 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3101 /* MT25408 "Hermon" QDR */
3102 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3103 /* MT25408 "Hermon" DDR PCIe gen2 */
3104 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3105 /* MT25408 "Hermon" QDR PCIe gen2 */
3106 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3107 /* MT25408 "Hermon" EN 10GigE */
3108 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3109 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
3110 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3111 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
3112 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3113 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
3114 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3115 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
3116 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3117 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
3118 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3119 /* MT26478 ConnectX2 40GigE PCIe gen2 */
3120 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT },
3121 /* MT25400 Family [ConnectX-2 Virtual Function] */
3122 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
3123 /* MT27500 Family [ConnectX-3] */
3124 { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
3125 /* MT27500 Family [ConnectX-3 Virtual Function] */
3126 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
3127 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
3128 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
3129 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
3130 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
3131 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
3132 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
3133 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
3134 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
3135 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
3136 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
3137 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
3138 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
3142 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
3144 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
3145 pci_channel_state_t state)
3147 mlx4_unload_one(pdev);
3149 return state == pci_channel_io_perm_failure ?
3150 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3153 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
3155 struct mlx4_dev *dev = pci_get_drvdata(pdev);
3156 struct mlx4_priv *priv = mlx4_priv(dev);
3159 ret = __mlx4_init_one(pdev, priv->pci_dev_data, priv);
3161 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
3164 static const struct pci_error_handlers mlx4_err_handler = {
3165 .error_detected = mlx4_pci_err_detected,
3166 .slot_reset = mlx4_pci_slot_reset,
3169 static struct pci_driver mlx4_driver = {
3171 .id_table = mlx4_pci_table,
3172 .probe = mlx4_init_one,
3173 .shutdown = mlx4_unload_one,
3174 .remove = mlx4_remove_one,
3175 .err_handler = &mlx4_err_handler,
3178 static int __init mlx4_verify_params(void)
3180 if ((log_num_mac < 0) || (log_num_mac > 7)) {
3181 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
3185 if (log_num_vlan != 0)
3186 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
3187 MLX4_LOG_NUM_VLANS);
3190 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
3192 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
3193 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
3198 /* Check if module param for ports type has legal combination */
3199 if (port_type_array[0] == false && port_type_array[1] == true) {
3200 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
3201 port_type_array[0] = true;
3204 if (mlx4_log_num_mgm_entry_size != -1 &&
3205 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
3206 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
3207 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
3208 mlx4_log_num_mgm_entry_size,
3209 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
3210 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
3217 static int __init mlx4_init(void)
3221 if (mlx4_verify_params())
3226 mlx4_wq = create_singlethread_workqueue("mlx4");
3230 ret = pci_register_driver(&mlx4_driver);
3232 destroy_workqueue(mlx4_wq);
3233 return ret < 0 ? ret : 0;
3236 static void __exit mlx4_cleanup(void)
3238 pci_unregister_driver(&mlx4_driver);
3239 destroy_workqueue(mlx4_wq);
3242 module_init(mlx4_init);
3243 module_exit(mlx4_cleanup);