2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/mlx4/device.h>
43 #include <linux/semaphore.h>
44 #include <rdma/ib_smi.h>
45 #include <linux/delay.h>
52 #define CMD_POLL_TOKEN 0xffff
53 #define INBOX_MASK 0xffffffffffffff00ULL
55 #define CMD_CHAN_VER 1
56 #define CMD_CHAN_IF_REV 1
59 /* command completed successfully: */
61 /* Internal error (such as a bus error) occurred while processing command: */
62 CMD_STAT_INTERNAL_ERR = 0x01,
63 /* Operation/command not supported or opcode modifier not supported: */
64 CMD_STAT_BAD_OP = 0x02,
65 /* Parameter not supported or parameter out of range: */
66 CMD_STAT_BAD_PARAM = 0x03,
67 /* System not enabled or bad system state: */
68 CMD_STAT_BAD_SYS_STATE = 0x04,
69 /* Attempt to access reserved or unallocaterd resource: */
70 CMD_STAT_BAD_RESOURCE = 0x05,
71 /* Requested resource is currently executing a command, or is otherwise busy: */
72 CMD_STAT_RESOURCE_BUSY = 0x06,
73 /* Required capability exceeds device limits: */
74 CMD_STAT_EXCEED_LIM = 0x08,
75 /* Resource is not in the appropriate state or ownership: */
76 CMD_STAT_BAD_RES_STATE = 0x09,
77 /* Index out of range: */
78 CMD_STAT_BAD_INDEX = 0x0a,
79 /* FW image corrupted: */
80 CMD_STAT_BAD_NVMEM = 0x0b,
81 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
82 CMD_STAT_ICM_ERROR = 0x0c,
83 /* Attempt to modify a QP/EE which is not in the presumed state: */
84 CMD_STAT_BAD_QP_STATE = 0x10,
85 /* Bad segment parameters (Address/Size): */
86 CMD_STAT_BAD_SEG_PARAM = 0x20,
87 /* Memory Region has Memory Windows bound to: */
88 CMD_STAT_REG_BOUND = 0x21,
89 /* HCA local attached memory not present: */
90 CMD_STAT_LAM_NOT_PRE = 0x22,
91 /* Bad management packet (silently discarded): */
92 CMD_STAT_BAD_PKT = 0x30,
93 /* More outstanding CQEs in CQ than new CQ size: */
94 CMD_STAT_BAD_SIZE = 0x40,
95 /* Multi Function device support required: */
96 CMD_STAT_MULTI_FUNC_REQ = 0x50,
100 HCR_IN_PARAM_OFFSET = 0x00,
101 HCR_IN_MODIFIER_OFFSET = 0x08,
102 HCR_OUT_PARAM_OFFSET = 0x0c,
103 HCR_TOKEN_OFFSET = 0x14,
104 HCR_STATUS_OFFSET = 0x18,
106 HCR_OPMOD_SHIFT = 12,
113 GO_BIT_TIMEOUT_MSECS = 10000
116 enum mlx4_vlan_transition {
117 MLX4_VLAN_TRANSITION_VST_VST = 0,
118 MLX4_VLAN_TRANSITION_VST_VGT = 1,
119 MLX4_VLAN_TRANSITION_VGT_VST = 2,
120 MLX4_VLAN_TRANSITION_VGT_VGT = 3,
124 struct mlx4_cmd_context {
125 struct completion done;
133 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
134 struct mlx4_vhcr_cmd *in_vhcr);
136 static int mlx4_status_to_errno(u8 status)
138 static const int trans_table[] = {
139 [CMD_STAT_INTERNAL_ERR] = -EIO,
140 [CMD_STAT_BAD_OP] = -EPERM,
141 [CMD_STAT_BAD_PARAM] = -EINVAL,
142 [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
143 [CMD_STAT_BAD_RESOURCE] = -EBADF,
144 [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
145 [CMD_STAT_EXCEED_LIM] = -ENOMEM,
146 [CMD_STAT_BAD_RES_STATE] = -EBADF,
147 [CMD_STAT_BAD_INDEX] = -EBADF,
148 [CMD_STAT_BAD_NVMEM] = -EFAULT,
149 [CMD_STAT_ICM_ERROR] = -ENFILE,
150 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
151 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
152 [CMD_STAT_REG_BOUND] = -EBUSY,
153 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
154 [CMD_STAT_BAD_PKT] = -EINVAL,
155 [CMD_STAT_BAD_SIZE] = -ENOMEM,
156 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
159 if (status >= ARRAY_SIZE(trans_table) ||
160 (status != CMD_STAT_OK && trans_table[status] == 0))
163 return trans_table[status];
166 static u8 mlx4_errno_to_status(int errno)
170 return CMD_STAT_BAD_OP;
172 return CMD_STAT_BAD_PARAM;
174 return CMD_STAT_BAD_SYS_STATE;
176 return CMD_STAT_RESOURCE_BUSY;
178 return CMD_STAT_EXCEED_LIM;
180 return CMD_STAT_ICM_ERROR;
182 return CMD_STAT_INTERNAL_ERR;
186 static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
190 case MLX4_CMD_UNMAP_ICM:
191 case MLX4_CMD_UNMAP_ICM_AUX:
192 case MLX4_CMD_UNMAP_FA:
193 case MLX4_CMD_2RST_QP:
194 case MLX4_CMD_HW2SW_EQ:
195 case MLX4_CMD_HW2SW_CQ:
196 case MLX4_CMD_HW2SW_SRQ:
197 case MLX4_CMD_HW2SW_MPT:
198 case MLX4_CMD_CLOSE_HCA:
199 case MLX4_QP_FLOW_STEERING_DETACH:
200 case MLX4_CMD_FREE_RES:
201 case MLX4_CMD_CLOSE_PORT:
204 case MLX4_CMD_QP_ATTACH:
205 /* On Detach case return success */
206 if (op_modifier == 0)
208 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
211 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
215 static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
217 /* Any error during the closing commands below is considered fatal */
218 if (op == MLX4_CMD_CLOSE_HCA ||
219 op == MLX4_CMD_HW2SW_EQ ||
220 op == MLX4_CMD_HW2SW_CQ ||
221 op == MLX4_CMD_2RST_QP ||
222 op == MLX4_CMD_HW2SW_SRQ ||
223 op == MLX4_CMD_SYNC_TPT ||
224 op == MLX4_CMD_UNMAP_ICM ||
225 op == MLX4_CMD_UNMAP_ICM_AUX ||
226 op == MLX4_CMD_UNMAP_FA)
228 /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
229 * CMD_STAT_REG_BOUND.
230 * This status indicates that memory region has memory windows bound to it
231 * which may result from invalid user space usage and is not fatal.
233 if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
238 static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
241 /* Only if reset flow is really active return code is based on
242 * command, otherwise current error code is returned.
244 if (mlx4_internal_err_reset) {
245 mlx4_enter_error_state(dev->persist);
246 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
252 static int comm_pending(struct mlx4_dev *dev)
254 struct mlx4_priv *priv = mlx4_priv(dev);
255 u32 status = readl(&priv->mfunc.comm->slave_read);
257 return (swab32(status) >> 31) != priv->cmd.comm_toggle;
260 static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
262 struct mlx4_priv *priv = mlx4_priv(dev);
265 /* To avoid writing to unknown addresses after the device state was
266 * changed to internal error and the function was rest,
267 * check the INTERNAL_ERROR flag which is updated under
268 * device_state_mutex lock.
270 mutex_lock(&dev->persist->device_state_mutex);
272 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
273 mutex_unlock(&dev->persist->device_state_mutex);
277 priv->cmd.comm_toggle ^= 1;
278 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
279 __raw_writel((__force u32) cpu_to_be32(val),
280 &priv->mfunc.comm->slave_write);
282 mutex_unlock(&dev->persist->device_state_mutex);
286 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
287 unsigned long timeout)
289 struct mlx4_priv *priv = mlx4_priv(dev);
292 int ret_from_pending = 0;
294 /* First, verify that the master reports correct status */
295 if (comm_pending(dev)) {
296 mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
297 priv->cmd.comm_toggle, cmd);
302 down(&priv->cmd.poll_sem);
303 if (mlx4_comm_cmd_post(dev, cmd, param)) {
304 /* Only in case the device state is INTERNAL_ERROR,
305 * mlx4_comm_cmd_post returns with an error
307 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
311 end = msecs_to_jiffies(timeout) + jiffies;
312 while (comm_pending(dev) && time_before(jiffies, end))
314 ret_from_pending = comm_pending(dev);
315 if (ret_from_pending) {
316 /* check if the slave is trying to boot in the middle of
317 * FLR process. The only non-zero result in the RESET command
318 * is MLX4_DELAY_RESET_SLAVE*/
319 if ((MLX4_COMM_CMD_RESET == cmd)) {
320 err = MLX4_DELAY_RESET_SLAVE;
323 mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
325 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
330 mlx4_enter_error_state(dev->persist);
332 up(&priv->cmd.poll_sem);
336 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
337 u16 param, u16 op, unsigned long timeout)
339 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
340 struct mlx4_cmd_context *context;
344 down(&cmd->event_sem);
346 spin_lock(&cmd->context_lock);
347 BUG_ON(cmd->free_head < 0);
348 context = &cmd->context[cmd->free_head];
349 context->token += cmd->token_mask + 1;
350 cmd->free_head = context->next;
351 spin_unlock(&cmd->context_lock);
353 reinit_completion(&context->done);
355 if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
356 /* Only in case the device state is INTERNAL_ERROR,
357 * mlx4_comm_cmd_post returns with an error
359 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
363 if (!wait_for_completion_timeout(&context->done,
364 msecs_to_jiffies(timeout))) {
365 mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
370 err = context->result;
371 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
372 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
373 vhcr_cmd, context->fw_status);
374 if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
378 /* wait for comm channel ready
379 * this is necessary for prevention the race
380 * when switching between event to polling mode
381 * Skipping this section in case the device is in FATAL_ERROR state,
382 * In this state, no commands are sent via the comm channel until
383 * the device has returned from reset.
385 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
386 end = msecs_to_jiffies(timeout) + jiffies;
387 while (comm_pending(dev) && time_before(jiffies, end))
393 err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
394 mlx4_enter_error_state(dev->persist);
396 spin_lock(&cmd->context_lock);
397 context->next = cmd->free_head;
398 cmd->free_head = context - cmd->context;
399 spin_unlock(&cmd->context_lock);
405 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
406 u16 op, unsigned long timeout)
408 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
409 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
411 if (mlx4_priv(dev)->cmd.use_events)
412 return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
413 return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
416 static int cmd_pending(struct mlx4_dev *dev)
420 if (pci_channel_offline(dev->persist->pdev))
423 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
425 return (status & swab32(1 << HCR_GO_BIT)) ||
426 (mlx4_priv(dev)->cmd.toggle ==
427 !!(status & swab32(1 << HCR_T_BIT)));
430 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
431 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
434 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
435 u32 __iomem *hcr = cmd->hcr;
439 mutex_lock(&dev->persist->device_state_mutex);
440 /* To avoid writing to unknown addresses after the device state was
441 * changed to internal error and the chip was reset,
442 * check the INTERNAL_ERROR flag which is updated under
443 * device_state_mutex lock.
445 if (pci_channel_offline(dev->persist->pdev) ||
446 (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
448 * Device is going through error recovery
449 * and cannot accept commands.
456 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
458 while (cmd_pending(dev)) {
459 if (pci_channel_offline(dev->persist->pdev)) {
461 * Device is going through error recovery
462 * and cannot accept commands.
467 if (time_after_eq(jiffies, end)) {
468 mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
475 * We use writel (instead of something like memcpy_toio)
476 * because writes of less than 32 bits to the HCR don't work
477 * (and some architectures such as ia64 implement memcpy_toio
478 * in terms of writeb).
480 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
481 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
482 __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
483 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
484 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
485 __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
487 /* __raw_writel may not order writes. */
490 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
491 (cmd->toggle << HCR_T_BIT) |
492 (event ? (1 << HCR_E_BIT) : 0) |
493 (op_modifier << HCR_OPMOD_SHIFT) |
497 * Make sure that our HCR writes don't get mixed in with
498 * writes from another CPU starting a FW command.
502 cmd->toggle = cmd->toggle ^ 1;
508 mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
509 op, ret, in_param, in_modifier, op_modifier);
510 mutex_unlock(&dev->persist->device_state_mutex);
515 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
516 int out_is_imm, u32 in_modifier, u8 op_modifier,
517 u16 op, unsigned long timeout)
519 struct mlx4_priv *priv = mlx4_priv(dev);
520 struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
523 mutex_lock(&priv->cmd.slave_cmd_mutex);
525 vhcr->in_param = cpu_to_be64(in_param);
526 vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
527 vhcr->in_modifier = cpu_to_be32(in_modifier);
528 vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
529 vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
531 vhcr->flags = !!(priv->cmd.use_events) << 6;
533 if (mlx4_is_master(dev)) {
534 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
539 be64_to_cpu(vhcr->out_param);
541 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
543 vhcr->status = CMD_STAT_BAD_PARAM;
546 ret = mlx4_status_to_errno(vhcr->status);
549 dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
550 ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
552 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
553 MLX4_COMM_TIME + timeout);
558 be64_to_cpu(vhcr->out_param);
560 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
562 vhcr->status = CMD_STAT_BAD_PARAM;
565 ret = mlx4_status_to_errno(vhcr->status);
567 if (dev->persist->state &
568 MLX4_DEVICE_STATE_INTERNAL_ERROR)
569 ret = mlx4_internal_err_ret_value(dev, op,
572 mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
576 mutex_unlock(&priv->cmd.slave_cmd_mutex);
580 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
581 int out_is_imm, u32 in_modifier, u8 op_modifier,
582 u16 op, unsigned long timeout)
584 struct mlx4_priv *priv = mlx4_priv(dev);
585 void __iomem *hcr = priv->cmd.hcr;
590 down(&priv->cmd.poll_sem);
592 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
594 * Device is going through error recovery
595 * and cannot accept commands.
597 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
601 if (out_is_imm && !out_param) {
602 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
608 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
609 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
613 end = msecs_to_jiffies(timeout) + jiffies;
614 while (cmd_pending(dev) && time_before(jiffies, end)) {
615 if (pci_channel_offline(dev->persist->pdev)) {
617 * Device is going through error recovery
618 * and cannot accept commands.
624 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
625 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
632 if (cmd_pending(dev)) {
633 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
641 (u64) be32_to_cpu((__force __be32)
642 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
643 (u64) be32_to_cpu((__force __be32)
644 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
645 stat = be32_to_cpu((__force __be32)
646 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
647 err = mlx4_status_to_errno(stat);
649 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
651 if (mlx4_closing_cmd_fatal_error(op, stat))
658 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
660 up(&priv->cmd.poll_sem);
664 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
666 struct mlx4_priv *priv = mlx4_priv(dev);
667 struct mlx4_cmd_context *context =
668 &priv->cmd.context[token & priv->cmd.token_mask];
670 /* previously timed out command completing at long last */
671 if (token != context->token)
674 context->fw_status = status;
675 context->result = mlx4_status_to_errno(status);
676 context->out_param = out_param;
678 complete(&context->done);
681 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
682 int out_is_imm, u32 in_modifier, u8 op_modifier,
683 u16 op, unsigned long timeout)
685 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
686 struct mlx4_cmd_context *context;
689 down(&cmd->event_sem);
691 spin_lock(&cmd->context_lock);
692 BUG_ON(cmd->free_head < 0);
693 context = &cmd->context[cmd->free_head];
694 context->token += cmd->token_mask + 1;
695 cmd->free_head = context->next;
696 spin_unlock(&cmd->context_lock);
698 if (out_is_imm && !out_param) {
699 mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
705 reinit_completion(&context->done);
707 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
708 in_modifier, op_modifier, op, context->token, 1);
712 if (!wait_for_completion_timeout(&context->done,
713 msecs_to_jiffies(timeout))) {
714 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
720 err = context->result;
722 /* Since we do not want to have this error message always
723 * displayed at driver start when there are ConnectX2 HCAs
724 * on the host, we deprecate the error message for this
725 * specific command/input_mod/opcode_mod/fw-status to be debug.
727 if (op == MLX4_CMD_SET_PORT && in_modifier == 1 &&
728 op_modifier == 0 && context->fw_status == CMD_STAT_BAD_SIZE)
729 mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
730 op, context->fw_status);
732 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
733 op, context->fw_status);
734 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
735 err = mlx4_internal_err_ret_value(dev, op, op_modifier);
736 else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
743 *out_param = context->out_param;
747 err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
749 spin_lock(&cmd->context_lock);
750 context->next = cmd->free_head;
751 cmd->free_head = context - cmd->context;
752 spin_unlock(&cmd->context_lock);
758 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
759 int out_is_imm, u32 in_modifier, u8 op_modifier,
760 u16 op, unsigned long timeout, int native)
762 if (pci_channel_offline(dev->persist->pdev))
763 return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
765 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
766 if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
767 return mlx4_internal_err_ret_value(dev, op,
769 if (mlx4_priv(dev)->cmd.use_events)
770 return mlx4_cmd_wait(dev, in_param, out_param,
771 out_is_imm, in_modifier,
772 op_modifier, op, timeout);
774 return mlx4_cmd_poll(dev, in_param, out_param,
775 out_is_imm, in_modifier,
776 op_modifier, op, timeout);
778 return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
779 in_modifier, op_modifier, op, timeout);
781 EXPORT_SYMBOL_GPL(__mlx4_cmd);
784 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
786 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
787 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
790 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
791 int slave, u64 slave_addr,
792 int size, int is_read)
797 if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
798 (slave & ~0x7f) | (size & 0xff)) {
799 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
800 slave_addr, master_addr, slave, size);
805 in_param = (u64) slave | slave_addr;
806 out_param = (u64) dev->caps.function | master_addr;
808 in_param = (u64) dev->caps.function | master_addr;
809 out_param = (u64) slave | slave_addr;
812 return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
814 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
817 static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
818 struct mlx4_cmd_mailbox *inbox,
819 struct mlx4_cmd_mailbox *outbox)
821 struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
822 struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
829 in_mad->attr_mod = cpu_to_be32(index / 32);
831 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
832 MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
837 for (i = 0; i < 32; ++i)
838 pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
843 static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
844 struct mlx4_cmd_mailbox *inbox,
845 struct mlx4_cmd_mailbox *outbox)
850 for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
851 err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
858 #define PORT_CAPABILITY_LOCATION_IN_SMP 20
859 #define PORT_STATE_OFFSET 32
861 static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
863 if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
864 return IB_PORT_ACTIVE;
869 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
870 struct mlx4_vhcr *vhcr,
871 struct mlx4_cmd_mailbox *inbox,
872 struct mlx4_cmd_mailbox *outbox,
873 struct mlx4_cmd_info *cmd)
875 struct ib_smp *smp = inbox->buf;
883 struct mlx4_priv *priv = mlx4_priv(dev);
884 struct ib_smp *outsmp = outbox->buf;
885 __be16 *outtab = (__be16 *)(outsmp->data);
886 __be32 slave_cap_mask;
887 __be64 slave_node_guid;
889 port = vhcr->in_modifier;
891 /* network-view bit is for driver use only, and should not be passed to FW */
892 opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
893 network_view = !!(vhcr->op_modifier & 0x8);
895 if (smp->base_version == 1 &&
896 smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
897 smp->class_version == 1) {
898 /* host view is paravirtualized */
899 if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
900 if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
901 index = be32_to_cpu(smp->attr_mod);
902 if (port < 1 || port > dev->caps.num_ports)
904 table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
905 sizeof(*table) * 32, GFP_KERNEL);
909 /* need to get the full pkey table because the paravirtualized
910 * pkeys may be scattered among several pkey blocks.
912 err = get_full_pkey_table(dev, port, table, inbox, outbox);
914 for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
915 pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
916 outtab[vidx % 32] = cpu_to_be16(table[pidx]);
922 if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
923 /*get the slave specific caps:*/
925 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
926 vhcr->in_modifier, opcode_modifier,
927 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
928 /* modify the response for slaves */
929 if (!err && slave != mlx4_master_func_num(dev)) {
930 u8 *state = outsmp->data + PORT_STATE_OFFSET;
932 *state = (*state & 0xf0) | vf_port_state(dev, port, slave);
933 slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
934 memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
938 if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
939 /* compute slave's gid block */
940 smp->attr_mod = cpu_to_be32(slave / 8);
942 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
943 vhcr->in_modifier, opcode_modifier,
944 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
946 /* if needed, move slave gid to index 0 */
949 outsmp->data + (slave % 8) * 8, 8);
950 /* delete all other gids */
951 memset(outsmp->data + 8, 0, 56);
955 if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
956 err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
957 vhcr->in_modifier, opcode_modifier,
958 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
960 slave_node_guid = mlx4_get_slave_node_guid(dev, slave);
961 memcpy(outsmp->data + 12, &slave_node_guid, 8);
968 /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
969 * These are the MADs used by ib verbs (such as ib_query_gids).
971 if (slave != mlx4_master_func_num(dev) &&
972 !mlx4_vf_smi_enabled(dev, slave, port)) {
973 if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
974 smp->method == IB_MGMT_METHOD_GET) || network_view) {
975 mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
976 slave, smp->method, smp->mgmt_class,
977 network_view ? "Network" : "Host",
978 be16_to_cpu(smp->attr_id));
983 return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
984 vhcr->in_modifier, opcode_modifier,
985 vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
988 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
989 struct mlx4_vhcr *vhcr,
990 struct mlx4_cmd_mailbox *inbox,
991 struct mlx4_cmd_mailbox *outbox,
992 struct mlx4_cmd_info *cmd)
997 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
998 struct mlx4_vhcr *vhcr,
999 struct mlx4_cmd_mailbox *inbox,
1000 struct mlx4_cmd_mailbox *outbox,
1001 struct mlx4_cmd_info *cmd)
1007 in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1008 out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1009 if (cmd->encode_slave_id) {
1010 in_param &= 0xffffffffffffff00ll;
1014 err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1015 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1016 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1018 if (cmd->out_is_imm)
1019 vhcr->out_param = out_param;
1024 static struct mlx4_cmd_info cmd_info[] = {
1026 .opcode = MLX4_CMD_QUERY_FW,
1029 .out_is_imm = false,
1030 .encode_slave_id = false,
1032 .wrapper = mlx4_QUERY_FW_wrapper
1035 .opcode = MLX4_CMD_QUERY_HCA,
1038 .out_is_imm = false,
1039 .encode_slave_id = false,
1044 .opcode = MLX4_CMD_QUERY_DEV_CAP,
1047 .out_is_imm = false,
1048 .encode_slave_id = false,
1050 .wrapper = mlx4_QUERY_DEV_CAP_wrapper
1053 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
1056 .out_is_imm = false,
1057 .encode_slave_id = false,
1059 .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1062 .opcode = MLX4_CMD_QUERY_ADAPTER,
1065 .out_is_imm = false,
1066 .encode_slave_id = false,
1071 .opcode = MLX4_CMD_INIT_PORT,
1073 .has_outbox = false,
1074 .out_is_imm = false,
1075 .encode_slave_id = false,
1077 .wrapper = mlx4_INIT_PORT_wrapper
1080 .opcode = MLX4_CMD_CLOSE_PORT,
1082 .has_outbox = false,
1083 .out_is_imm = false,
1084 .encode_slave_id = false,
1086 .wrapper = mlx4_CLOSE_PORT_wrapper
1089 .opcode = MLX4_CMD_QUERY_PORT,
1092 .out_is_imm = false,
1093 .encode_slave_id = false,
1095 .wrapper = mlx4_QUERY_PORT_wrapper
1098 .opcode = MLX4_CMD_SET_PORT,
1100 .has_outbox = false,
1101 .out_is_imm = false,
1102 .encode_slave_id = false,
1104 .wrapper = mlx4_SET_PORT_wrapper
1107 .opcode = MLX4_CMD_MAP_EQ,
1109 .has_outbox = false,
1110 .out_is_imm = false,
1111 .encode_slave_id = false,
1113 .wrapper = mlx4_MAP_EQ_wrapper
1116 .opcode = MLX4_CMD_SW2HW_EQ,
1118 .has_outbox = false,
1119 .out_is_imm = false,
1120 .encode_slave_id = true,
1122 .wrapper = mlx4_SW2HW_EQ_wrapper
1125 .opcode = MLX4_CMD_HW_HEALTH_CHECK,
1127 .has_outbox = false,
1128 .out_is_imm = false,
1129 .encode_slave_id = false,
1134 .opcode = MLX4_CMD_NOP,
1136 .has_outbox = false,
1137 .out_is_imm = false,
1138 .encode_slave_id = false,
1143 .opcode = MLX4_CMD_CONFIG_DEV,
1146 .out_is_imm = false,
1147 .encode_slave_id = false,
1149 .wrapper = mlx4_CONFIG_DEV_wrapper
1152 .opcode = MLX4_CMD_ALLOC_RES,
1154 .has_outbox = false,
1156 .encode_slave_id = false,
1158 .wrapper = mlx4_ALLOC_RES_wrapper
1161 .opcode = MLX4_CMD_FREE_RES,
1163 .has_outbox = false,
1164 .out_is_imm = false,
1165 .encode_slave_id = false,
1167 .wrapper = mlx4_FREE_RES_wrapper
1170 .opcode = MLX4_CMD_SW2HW_MPT,
1172 .has_outbox = false,
1173 .out_is_imm = false,
1174 .encode_slave_id = true,
1176 .wrapper = mlx4_SW2HW_MPT_wrapper
1179 .opcode = MLX4_CMD_QUERY_MPT,
1182 .out_is_imm = false,
1183 .encode_slave_id = false,
1185 .wrapper = mlx4_QUERY_MPT_wrapper
1188 .opcode = MLX4_CMD_HW2SW_MPT,
1190 .has_outbox = false,
1191 .out_is_imm = false,
1192 .encode_slave_id = false,
1194 .wrapper = mlx4_HW2SW_MPT_wrapper
1197 .opcode = MLX4_CMD_READ_MTT,
1200 .out_is_imm = false,
1201 .encode_slave_id = false,
1206 .opcode = MLX4_CMD_WRITE_MTT,
1208 .has_outbox = false,
1209 .out_is_imm = false,
1210 .encode_slave_id = false,
1212 .wrapper = mlx4_WRITE_MTT_wrapper
1215 .opcode = MLX4_CMD_SYNC_TPT,
1217 .has_outbox = false,
1218 .out_is_imm = false,
1219 .encode_slave_id = false,
1224 .opcode = MLX4_CMD_HW2SW_EQ,
1226 .has_outbox = false,
1227 .out_is_imm = false,
1228 .encode_slave_id = true,
1230 .wrapper = mlx4_HW2SW_EQ_wrapper
1233 .opcode = MLX4_CMD_QUERY_EQ,
1236 .out_is_imm = false,
1237 .encode_slave_id = true,
1239 .wrapper = mlx4_QUERY_EQ_wrapper
1242 .opcode = MLX4_CMD_SW2HW_CQ,
1244 .has_outbox = false,
1245 .out_is_imm = false,
1246 .encode_slave_id = true,
1248 .wrapper = mlx4_SW2HW_CQ_wrapper
1251 .opcode = MLX4_CMD_HW2SW_CQ,
1253 .has_outbox = false,
1254 .out_is_imm = false,
1255 .encode_slave_id = false,
1257 .wrapper = mlx4_HW2SW_CQ_wrapper
1260 .opcode = MLX4_CMD_QUERY_CQ,
1263 .out_is_imm = false,
1264 .encode_slave_id = false,
1266 .wrapper = mlx4_QUERY_CQ_wrapper
1269 .opcode = MLX4_CMD_MODIFY_CQ,
1271 .has_outbox = false,
1273 .encode_slave_id = false,
1275 .wrapper = mlx4_MODIFY_CQ_wrapper
1278 .opcode = MLX4_CMD_SW2HW_SRQ,
1280 .has_outbox = false,
1281 .out_is_imm = false,
1282 .encode_slave_id = true,
1284 .wrapper = mlx4_SW2HW_SRQ_wrapper
1287 .opcode = MLX4_CMD_HW2SW_SRQ,
1289 .has_outbox = false,
1290 .out_is_imm = false,
1291 .encode_slave_id = false,
1293 .wrapper = mlx4_HW2SW_SRQ_wrapper
1296 .opcode = MLX4_CMD_QUERY_SRQ,
1299 .out_is_imm = false,
1300 .encode_slave_id = false,
1302 .wrapper = mlx4_QUERY_SRQ_wrapper
1305 .opcode = MLX4_CMD_ARM_SRQ,
1307 .has_outbox = false,
1308 .out_is_imm = false,
1309 .encode_slave_id = false,
1311 .wrapper = mlx4_ARM_SRQ_wrapper
1314 .opcode = MLX4_CMD_RST2INIT_QP,
1316 .has_outbox = false,
1317 .out_is_imm = false,
1318 .encode_slave_id = true,
1320 .wrapper = mlx4_RST2INIT_QP_wrapper
1323 .opcode = MLX4_CMD_INIT2INIT_QP,
1325 .has_outbox = false,
1326 .out_is_imm = false,
1327 .encode_slave_id = false,
1329 .wrapper = mlx4_INIT2INIT_QP_wrapper
1332 .opcode = MLX4_CMD_INIT2RTR_QP,
1334 .has_outbox = false,
1335 .out_is_imm = false,
1336 .encode_slave_id = false,
1338 .wrapper = mlx4_INIT2RTR_QP_wrapper
1341 .opcode = MLX4_CMD_RTR2RTS_QP,
1343 .has_outbox = false,
1344 .out_is_imm = false,
1345 .encode_slave_id = false,
1347 .wrapper = mlx4_RTR2RTS_QP_wrapper
1350 .opcode = MLX4_CMD_RTS2RTS_QP,
1352 .has_outbox = false,
1353 .out_is_imm = false,
1354 .encode_slave_id = false,
1356 .wrapper = mlx4_RTS2RTS_QP_wrapper
1359 .opcode = MLX4_CMD_SQERR2RTS_QP,
1361 .has_outbox = false,
1362 .out_is_imm = false,
1363 .encode_slave_id = false,
1365 .wrapper = mlx4_SQERR2RTS_QP_wrapper
1368 .opcode = MLX4_CMD_2ERR_QP,
1370 .has_outbox = false,
1371 .out_is_imm = false,
1372 .encode_slave_id = false,
1374 .wrapper = mlx4_GEN_QP_wrapper
1377 .opcode = MLX4_CMD_RTS2SQD_QP,
1379 .has_outbox = false,
1380 .out_is_imm = false,
1381 .encode_slave_id = false,
1383 .wrapper = mlx4_GEN_QP_wrapper
1386 .opcode = MLX4_CMD_SQD2SQD_QP,
1388 .has_outbox = false,
1389 .out_is_imm = false,
1390 .encode_slave_id = false,
1392 .wrapper = mlx4_SQD2SQD_QP_wrapper
1395 .opcode = MLX4_CMD_SQD2RTS_QP,
1397 .has_outbox = false,
1398 .out_is_imm = false,
1399 .encode_slave_id = false,
1401 .wrapper = mlx4_SQD2RTS_QP_wrapper
1404 .opcode = MLX4_CMD_2RST_QP,
1406 .has_outbox = false,
1407 .out_is_imm = false,
1408 .encode_slave_id = false,
1410 .wrapper = mlx4_2RST_QP_wrapper
1413 .opcode = MLX4_CMD_QUERY_QP,
1416 .out_is_imm = false,
1417 .encode_slave_id = false,
1419 .wrapper = mlx4_GEN_QP_wrapper
1422 .opcode = MLX4_CMD_SUSPEND_QP,
1424 .has_outbox = false,
1425 .out_is_imm = false,
1426 .encode_slave_id = false,
1428 .wrapper = mlx4_GEN_QP_wrapper
1431 .opcode = MLX4_CMD_UNSUSPEND_QP,
1433 .has_outbox = false,
1434 .out_is_imm = false,
1435 .encode_slave_id = false,
1437 .wrapper = mlx4_GEN_QP_wrapper
1440 .opcode = MLX4_CMD_UPDATE_QP,
1442 .has_outbox = false,
1443 .out_is_imm = false,
1444 .encode_slave_id = false,
1446 .wrapper = mlx4_UPDATE_QP_wrapper
1449 .opcode = MLX4_CMD_GET_OP_REQ,
1451 .has_outbox = false,
1452 .out_is_imm = false,
1453 .encode_slave_id = false,
1455 .wrapper = mlx4_CMD_EPERM_wrapper,
1458 .opcode = MLX4_CMD_CONF_SPECIAL_QP,
1460 .has_outbox = false,
1461 .out_is_imm = false,
1462 .encode_slave_id = false,
1463 .verify = NULL, /* XXX verify: only demux can do this */
1467 .opcode = MLX4_CMD_MAD_IFC,
1470 .out_is_imm = false,
1471 .encode_slave_id = false,
1473 .wrapper = mlx4_MAD_IFC_wrapper
1476 .opcode = MLX4_CMD_MAD_DEMUX,
1478 .has_outbox = false,
1479 .out_is_imm = false,
1480 .encode_slave_id = false,
1482 .wrapper = mlx4_CMD_EPERM_wrapper
1485 .opcode = MLX4_CMD_QUERY_IF_STAT,
1488 .out_is_imm = false,
1489 .encode_slave_id = false,
1491 .wrapper = mlx4_QUERY_IF_STAT_wrapper
1494 .opcode = MLX4_CMD_ACCESS_REG,
1497 .out_is_imm = false,
1498 .encode_slave_id = false,
1500 .wrapper = mlx4_ACCESS_REG_wrapper,
1502 /* Native multicast commands are not available for guests */
1504 .opcode = MLX4_CMD_QP_ATTACH,
1506 .has_outbox = false,
1507 .out_is_imm = false,
1508 .encode_slave_id = false,
1510 .wrapper = mlx4_QP_ATTACH_wrapper
1513 .opcode = MLX4_CMD_PROMISC,
1515 .has_outbox = false,
1516 .out_is_imm = false,
1517 .encode_slave_id = false,
1519 .wrapper = mlx4_PROMISC_wrapper
1521 /* Ethernet specific commands */
1523 .opcode = MLX4_CMD_SET_VLAN_FLTR,
1525 .has_outbox = false,
1526 .out_is_imm = false,
1527 .encode_slave_id = false,
1529 .wrapper = mlx4_SET_VLAN_FLTR_wrapper
1532 .opcode = MLX4_CMD_SET_MCAST_FLTR,
1534 .has_outbox = false,
1535 .out_is_imm = false,
1536 .encode_slave_id = false,
1538 .wrapper = mlx4_SET_MCAST_FLTR_wrapper
1541 .opcode = MLX4_CMD_DUMP_ETH_STATS,
1544 .out_is_imm = false,
1545 .encode_slave_id = false,
1547 .wrapper = mlx4_DUMP_ETH_STATS_wrapper
1550 .opcode = MLX4_CMD_INFORM_FLR_DONE,
1552 .has_outbox = false,
1553 .out_is_imm = false,
1554 .encode_slave_id = false,
1558 /* flow steering commands */
1560 .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1562 .has_outbox = false,
1564 .encode_slave_id = false,
1566 .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1569 .opcode = MLX4_QP_FLOW_STEERING_DETACH,
1571 .has_outbox = false,
1572 .out_is_imm = false,
1573 .encode_slave_id = false,
1575 .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1578 .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1580 .has_outbox = false,
1581 .out_is_imm = false,
1582 .encode_slave_id = false,
1584 .wrapper = mlx4_CMD_EPERM_wrapper
1587 .opcode = MLX4_CMD_VIRT_PORT_MAP,
1589 .has_outbox = false,
1590 .out_is_imm = false,
1591 .encode_slave_id = false,
1593 .wrapper = mlx4_CMD_EPERM_wrapper
1597 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1598 struct mlx4_vhcr_cmd *in_vhcr)
1600 struct mlx4_priv *priv = mlx4_priv(dev);
1601 struct mlx4_cmd_info *cmd = NULL;
1602 struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1603 struct mlx4_vhcr *vhcr;
1604 struct mlx4_cmd_mailbox *inbox = NULL;
1605 struct mlx4_cmd_mailbox *outbox = NULL;
1612 /* Create sw representation of Virtual HCR */
1613 vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1617 /* DMA in the vHCR */
1619 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1620 priv->mfunc.master.slave_state[slave].vhcr_dma,
1621 ALIGN(sizeof(struct mlx4_vhcr_cmd),
1622 MLX4_ACCESS_MEM_ALIGN), 1);
1624 if (!(dev->persist->state &
1625 MLX4_DEVICE_STATE_INTERNAL_ERROR))
1626 mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1633 /* Fill SW VHCR fields */
1634 vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1635 vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1636 vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1637 vhcr->token = be16_to_cpu(vhcr_cmd->token);
1638 vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1639 vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1640 vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1642 /* Lookup command */
1643 for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1644 if (vhcr->op == cmd_info[i].opcode) {
1650 mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1652 vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1657 if (cmd->has_inbox) {
1658 vhcr->in_param &= INBOX_MASK;
1659 inbox = mlx4_alloc_cmd_mailbox(dev);
1660 if (IS_ERR(inbox)) {
1661 vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1666 ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1668 MLX4_MAILBOX_SIZE, 1);
1670 if (!(dev->persist->state &
1671 MLX4_DEVICE_STATE_INTERNAL_ERROR))
1672 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1673 __func__, cmd->opcode);
1674 vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1679 /* Apply permission and bound checks if applicable */
1680 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1681 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1682 vhcr->op, slave, vhcr->in_modifier);
1683 vhcr_cmd->status = CMD_STAT_BAD_OP;
1687 /* Allocate outbox */
1688 if (cmd->has_outbox) {
1689 outbox = mlx4_alloc_cmd_mailbox(dev);
1690 if (IS_ERR(outbox)) {
1691 vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1697 /* Execute the command! */
1699 err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1701 if (cmd->out_is_imm)
1702 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1704 in_param = cmd->has_inbox ? (u64) inbox->dma :
1706 out_param = cmd->has_outbox ? (u64) outbox->dma :
1708 err = __mlx4_cmd(dev, in_param, &out_param,
1709 cmd->out_is_imm, vhcr->in_modifier,
1710 vhcr->op_modifier, vhcr->op,
1711 MLX4_CMD_TIME_CLASS_A,
1714 if (cmd->out_is_imm) {
1715 vhcr->out_param = out_param;
1716 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1721 if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
1722 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1723 vhcr->op, slave, vhcr->errno, err);
1724 vhcr_cmd->status = mlx4_errno_to_status(err);
1729 /* Write outbox if command completed successfully */
1730 if (cmd->has_outbox && !vhcr_cmd->status) {
1731 ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1733 MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1735 /* If we failed to write back the outbox after the
1736 *command was successfully executed, we must fail this
1737 * slave, as it is now in undefined state */
1738 if (!(dev->persist->state &
1739 MLX4_DEVICE_STATE_INTERNAL_ERROR))
1740 mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1746 /* DMA back vhcr result */
1748 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1749 priv->mfunc.master.slave_state[slave].vhcr_dma,
1750 ALIGN(sizeof(struct mlx4_vhcr),
1751 MLX4_ACCESS_MEM_ALIGN),
1754 mlx4_err(dev, "%s:Failed writing vhcr result\n",
1756 else if (vhcr->e_bit &&
1757 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1758 mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1764 mlx4_free_cmd_mailbox(dev, inbox);
1765 mlx4_free_cmd_mailbox(dev, outbox);
1769 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1770 int slave, int port)
1772 struct mlx4_vport_oper_state *vp_oper;
1773 struct mlx4_vport_state *vp_admin;
1774 struct mlx4_vf_immed_vlan_work *work;
1775 struct mlx4_dev *dev = &(priv->dev);
1777 int admin_vlan_ix = NO_INDX;
1779 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1780 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1782 if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1783 vp_oper->state.default_qos == vp_admin->default_qos &&
1784 vp_oper->state.link_state == vp_admin->link_state)
1787 if (!(priv->mfunc.master.slave_state[slave].active &&
1788 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1789 /* even if the UPDATE_QP command isn't supported, we still want
1790 * to set this VF link according to the admin directive
1792 vp_oper->state.link_state = vp_admin->link_state;
1796 mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1798 mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1799 vp_admin->default_vlan, vp_admin->default_qos,
1800 vp_admin->link_state);
1802 work = kzalloc(sizeof(*work), GFP_KERNEL);
1806 if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1807 if (MLX4_VGT != vp_admin->default_vlan) {
1808 err = __mlx4_register_vlan(&priv->dev, port,
1809 vp_admin->default_vlan,
1813 mlx4_warn(&priv->dev,
1814 "No vlan resources slave %d, port %d\n",
1819 admin_vlan_ix = NO_INDX;
1821 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1822 mlx4_dbg(&priv->dev,
1823 "alloc vlan %d idx %d slave %d port %d\n",
1824 (int)(vp_admin->default_vlan),
1825 admin_vlan_ix, slave, port);
1828 /* save original vlan ix and vlan id */
1829 work->orig_vlan_id = vp_oper->state.default_vlan;
1830 work->orig_vlan_ix = vp_oper->vlan_idx;
1832 /* handle new qos */
1833 if (vp_oper->state.default_qos != vp_admin->default_qos)
1834 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1836 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1837 vp_oper->vlan_idx = admin_vlan_ix;
1839 vp_oper->state.default_vlan = vp_admin->default_vlan;
1840 vp_oper->state.default_qos = vp_admin->default_qos;
1841 vp_oper->state.link_state = vp_admin->link_state;
1843 if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1844 work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1846 /* iterate over QPs owned by this slave, using UPDATE_QP */
1848 work->slave = slave;
1849 work->qos = vp_oper->state.default_qos;
1850 work->vlan_id = vp_oper->state.default_vlan;
1851 work->vlan_ix = vp_oper->vlan_idx;
1853 INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1854 queue_work(priv->mfunc.master.comm_wq, &work->work);
1860 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1863 struct mlx4_vport_state *vp_admin;
1864 struct mlx4_vport_oper_state *vp_oper;
1865 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1867 int min_port = find_first_bit(actv_ports.ports,
1868 priv->dev.caps.num_ports) + 1;
1869 int max_port = min_port - 1 +
1870 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1872 for (port = min_port; port <= max_port; port++) {
1873 if (!test_bit(port - 1, actv_ports.ports))
1875 priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1876 priv->mfunc.master.vf_admin[slave].enable_smi[port];
1877 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1878 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1879 vp_oper->state = *vp_admin;
1880 if (MLX4_VGT != vp_admin->default_vlan) {
1881 err = __mlx4_register_vlan(&priv->dev, port,
1882 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1884 vp_oper->vlan_idx = NO_INDX;
1885 mlx4_warn(&priv->dev,
1886 "No vlan resources slave %d, port %d\n",
1890 mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
1891 (int)(vp_oper->state.default_vlan),
1892 vp_oper->vlan_idx, slave, port);
1894 if (vp_admin->spoofchk) {
1895 vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1898 if (0 > vp_oper->mac_idx) {
1899 err = vp_oper->mac_idx;
1900 vp_oper->mac_idx = NO_INDX;
1901 mlx4_warn(&priv->dev,
1902 "No mac resources slave %d, port %d\n",
1906 mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
1907 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1913 static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1916 struct mlx4_vport_oper_state *vp_oper;
1917 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1919 int min_port = find_first_bit(actv_ports.ports,
1920 priv->dev.caps.num_ports) + 1;
1921 int max_port = min_port - 1 +
1922 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1925 for (port = min_port; port <= max_port; port++) {
1926 if (!test_bit(port - 1, actv_ports.ports))
1928 priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1929 MLX4_VF_SMI_DISABLED;
1930 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1931 if (NO_INDX != vp_oper->vlan_idx) {
1932 __mlx4_unregister_vlan(&priv->dev,
1933 port, vp_oper->state.default_vlan);
1934 vp_oper->vlan_idx = NO_INDX;
1936 if (NO_INDX != vp_oper->mac_idx) {
1937 __mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
1938 vp_oper->mac_idx = NO_INDX;
1944 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1945 u16 param, u8 toggle)
1947 struct mlx4_priv *priv = mlx4_priv(dev);
1948 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1950 u8 is_going_down = 0;
1952 unsigned long flags;
1954 slave_state[slave].comm_toggle ^= 1;
1955 reply = (u32) slave_state[slave].comm_toggle << 31;
1956 if (toggle != slave_state[slave].comm_toggle) {
1957 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
1961 if (cmd == MLX4_COMM_CMD_RESET) {
1962 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1963 slave_state[slave].active = false;
1964 slave_state[slave].old_vlan_api = false;
1965 mlx4_master_deactivate_admin_state(priv, slave);
1966 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1967 slave_state[slave].event_eq[i].eqn = -1;
1968 slave_state[slave].event_eq[i].token = 0;
1970 /*check if we are in the middle of FLR process,
1971 if so return "retry" status to the slave*/
1972 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
1973 goto inform_slave_state;
1975 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
1977 /* write the version in the event field */
1978 reply |= mlx4_comm_get_version();
1982 /*command from slave in the middle of FLR*/
1983 if (cmd != MLX4_COMM_CMD_RESET &&
1984 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1985 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
1991 case MLX4_COMM_CMD_VHCR0:
1992 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1994 slave_state[slave].vhcr_dma = ((u64) param) << 48;
1995 priv->mfunc.master.slave_state[slave].cookie = 0;
1997 case MLX4_COMM_CMD_VHCR1:
1998 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
2000 slave_state[slave].vhcr_dma |= ((u64) param) << 32;
2002 case MLX4_COMM_CMD_VHCR2:
2003 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
2005 slave_state[slave].vhcr_dma |= ((u64) param) << 16;
2007 case MLX4_COMM_CMD_VHCR_EN:
2008 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
2010 slave_state[slave].vhcr_dma |= param;
2011 if (mlx4_master_activate_admin_state(priv, slave))
2013 slave_state[slave].active = true;
2014 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
2016 case MLX4_COMM_CMD_VHCR_POST:
2017 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
2018 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2019 mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2020 slave, cmd, slave_state[slave].last_cmd);
2024 mutex_lock(&priv->cmd.slave_cmd_mutex);
2025 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
2026 mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
2028 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2031 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2034 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
2037 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2038 if (!slave_state[slave].is_slave_going_down)
2039 slave_state[slave].last_cmd = cmd;
2042 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2043 if (is_going_down) {
2044 mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
2048 __raw_writel((__force u32) cpu_to_be32(reply),
2049 &priv->mfunc.comm[slave].slave_read);
2055 /* cleanup any slave resources */
2056 if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2057 mlx4_delete_all_resources_for_slave(dev, slave);
2059 if (cmd != MLX4_COMM_CMD_RESET) {
2060 mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2062 /* Turn on internal error letting slave reset itself immeditaly,
2063 * otherwise it might take till timeout on command is passed
2065 reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2068 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2069 if (!slave_state[slave].is_slave_going_down)
2070 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2071 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2072 /*with slave in the middle of flr, no need to clean resources again.*/
2074 memset(&slave_state[slave].event_eq, 0,
2075 sizeof(struct mlx4_slave_event_eq_info));
2076 __raw_writel((__force u32) cpu_to_be32(reply),
2077 &priv->mfunc.comm[slave].slave_read);
2081 /* master command processing */
2082 void mlx4_master_comm_channel(struct work_struct *work)
2084 struct mlx4_mfunc_master_ctx *master =
2086 struct mlx4_mfunc_master_ctx,
2088 struct mlx4_mfunc *mfunc =
2089 container_of(master, struct mlx4_mfunc, master);
2090 struct mlx4_priv *priv =
2091 container_of(mfunc, struct mlx4_priv, mfunc);
2092 struct mlx4_dev *dev = &priv->dev;
2102 bit_vec = master->comm_arm_bit_vector;
2103 for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2104 vec = be32_to_cpu(bit_vec[i]);
2105 for (j = 0; j < 32; j++) {
2106 if (!(vec & (1 << j)))
2109 slave = (i * 32) + j;
2110 comm_cmd = swab32(readl(
2111 &mfunc->comm[slave].slave_write));
2112 slt = swab32(readl(&mfunc->comm[slave].slave_read))
2114 toggle = comm_cmd >> 31;
2115 if (toggle != slt) {
2116 if (master->slave_state[slave].comm_toggle
2118 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2120 master->slave_state[slave].comm_toggle);
2121 master->slave_state[slave].comm_toggle =
2124 mlx4_master_do_cmd(dev, slave,
2125 comm_cmd >> 16 & 0xff,
2126 comm_cmd & 0xffff, toggle);
2132 if (reported && reported != served)
2133 mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
2136 if (mlx4_ARM_COMM_CHANNEL(dev))
2137 mlx4_warn(dev, "Failed to arm comm channel events\n");
2140 static int sync_toggles(struct mlx4_dev *dev)
2142 struct mlx4_priv *priv = mlx4_priv(dev);
2147 wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2148 if (wr_toggle == 0xffffffff)
2149 end = jiffies + msecs_to_jiffies(30000);
2151 end = jiffies + msecs_to_jiffies(5000);
2153 while (time_before(jiffies, end)) {
2154 rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2155 if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2156 /* PCI might be offline */
2158 wr_toggle = swab32(readl(&priv->mfunc.comm->
2163 if (rd_toggle >> 31 == wr_toggle >> 31) {
2164 priv->cmd.comm_toggle = rd_toggle >> 31;
2172 * we could reach here if for example the previous VM using this
2173 * function misbehaved and left the channel with unsynced state. We
2174 * should fix this here and give this VM a chance to use a properly
2177 mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2178 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2179 __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2180 priv->cmd.comm_toggle = 0;
2185 int mlx4_multi_func_init(struct mlx4_dev *dev)
2187 struct mlx4_priv *priv = mlx4_priv(dev);
2188 struct mlx4_slave_state *s_state;
2189 int i, j, err, port;
2191 if (mlx4_is_master(dev))
2193 ioremap(pci_resource_start(dev->persist->pdev,
2194 priv->fw.comm_bar) +
2195 priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2198 ioremap(pci_resource_start(dev->persist->pdev, 2) +
2199 MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2200 if (!priv->mfunc.comm) {
2201 mlx4_err(dev, "Couldn't map communication vector\n");
2205 if (mlx4_is_master(dev)) {
2206 priv->mfunc.master.slave_state =
2207 kzalloc(dev->num_slaves *
2208 sizeof(struct mlx4_slave_state), GFP_KERNEL);
2209 if (!priv->mfunc.master.slave_state)
2212 priv->mfunc.master.vf_admin =
2213 kzalloc(dev->num_slaves *
2214 sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2215 if (!priv->mfunc.master.vf_admin)
2216 goto err_comm_admin;
2218 priv->mfunc.master.vf_oper =
2219 kzalloc(dev->num_slaves *
2220 sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2221 if (!priv->mfunc.master.vf_oper)
2224 for (i = 0; i < dev->num_slaves; ++i) {
2225 s_state = &priv->mfunc.master.slave_state[i];
2226 s_state->last_cmd = MLX4_COMM_CMD_RESET;
2227 mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2228 for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2229 s_state->event_eq[j].eqn = -1;
2230 __raw_writel((__force u32) 0,
2231 &priv->mfunc.comm[i].slave_write);
2232 __raw_writel((__force u32) 0,
2233 &priv->mfunc.comm[i].slave_read);
2235 for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2236 s_state->vlan_filter[port] =
2237 kzalloc(sizeof(struct mlx4_vlan_fltr),
2239 if (!s_state->vlan_filter[port]) {
2241 kfree(s_state->vlan_filter[port]);
2244 INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2245 priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
2246 priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
2247 priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
2248 priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
2250 spin_lock_init(&s_state->lock);
2253 memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
2254 priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2255 INIT_WORK(&priv->mfunc.master.comm_work,
2256 mlx4_master_comm_channel);
2257 INIT_WORK(&priv->mfunc.master.slave_event_work,
2258 mlx4_gen_slave_eqe);
2259 INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2260 mlx4_master_handle_slave_flr);
2261 spin_lock_init(&priv->mfunc.master.slave_state_lock);
2262 spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2263 priv->mfunc.master.comm_wq =
2264 create_singlethread_workqueue("mlx4_comm");
2265 if (!priv->mfunc.master.comm_wq)
2268 if (mlx4_init_resource_tracker(dev))
2272 err = sync_toggles(dev);
2274 mlx4_err(dev, "Couldn't sync toggles\n");
2281 flush_workqueue(priv->mfunc.master.comm_wq);
2282 destroy_workqueue(priv->mfunc.master.comm_wq);
2285 for (port = 1; port <= MLX4_MAX_PORTS; port++)
2286 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2288 kfree(priv->mfunc.master.vf_oper);
2290 kfree(priv->mfunc.master.vf_admin);
2292 kfree(priv->mfunc.master.slave_state);
2294 iounmap(priv->mfunc.comm);
2296 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2298 priv->mfunc.vhcr_dma);
2299 priv->mfunc.vhcr = NULL;
2303 int mlx4_cmd_init(struct mlx4_dev *dev)
2305 struct mlx4_priv *priv = mlx4_priv(dev);
2308 if (!priv->cmd.initialized) {
2309 mutex_init(&priv->cmd.slave_cmd_mutex);
2310 sema_init(&priv->cmd.poll_sem, 1);
2311 priv->cmd.use_events = 0;
2312 priv->cmd.toggle = 1;
2313 priv->cmd.initialized = 1;
2314 flags |= MLX4_CMD_CLEANUP_STRUCT;
2317 if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2318 priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2319 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2320 if (!priv->cmd.hcr) {
2321 mlx4_err(dev, "Couldn't map command register\n");
2324 flags |= MLX4_CMD_CLEANUP_HCR;
2327 if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2328 priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2330 &priv->mfunc.vhcr_dma,
2332 if (!priv->mfunc.vhcr)
2335 flags |= MLX4_CMD_CLEANUP_VHCR;
2338 if (!priv->cmd.pool) {
2339 priv->cmd.pool = pci_pool_create("mlx4_cmd",
2342 MLX4_MAILBOX_SIZE, 0);
2343 if (!priv->cmd.pool)
2346 flags |= MLX4_CMD_CLEANUP_POOL;
2352 mlx4_cmd_cleanup(dev, flags);
2356 void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2358 struct mlx4_priv *priv = mlx4_priv(dev);
2362 /* Report an internal error event to all
2363 * communication channels.
2365 for (slave = 0; slave < dev->num_slaves; slave++) {
2366 slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2367 slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2368 __raw_writel((__force u32)cpu_to_be32(slave_read),
2369 &priv->mfunc.comm[slave].slave_read);
2370 /* Make sure that our comm channel write doesn't
2371 * get mixed in with writes from another CPU.
2377 void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2379 struct mlx4_priv *priv = mlx4_priv(dev);
2382 if (mlx4_is_master(dev)) {
2383 flush_workqueue(priv->mfunc.master.comm_wq);
2384 destroy_workqueue(priv->mfunc.master.comm_wq);
2385 for (i = 0; i < dev->num_slaves; i++) {
2386 for (port = 1; port <= MLX4_MAX_PORTS; port++)
2387 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2389 kfree(priv->mfunc.master.slave_state);
2390 kfree(priv->mfunc.master.vf_admin);
2391 kfree(priv->mfunc.master.vf_oper);
2392 dev->num_slaves = 0;
2395 iounmap(priv->mfunc.comm);
2398 void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2400 struct mlx4_priv *priv = mlx4_priv(dev);
2402 if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2403 pci_pool_destroy(priv->cmd.pool);
2404 priv->cmd.pool = NULL;
2407 if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2408 (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2409 iounmap(priv->cmd.hcr);
2410 priv->cmd.hcr = NULL;
2412 if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2413 (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2414 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2415 priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2416 priv->mfunc.vhcr = NULL;
2418 if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2419 priv->cmd.initialized = 0;
2423 * Switch to using events to issue FW commands (can only be called
2424 * after event queue for command events has been initialized).
2426 int mlx4_cmd_use_events(struct mlx4_dev *dev)
2428 struct mlx4_priv *priv = mlx4_priv(dev);
2432 priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2433 sizeof (struct mlx4_cmd_context),
2435 if (!priv->cmd.context)
2438 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2439 priv->cmd.context[i].token = i;
2440 priv->cmd.context[i].next = i + 1;
2441 /* To support fatal error flow, initialize all
2442 * cmd contexts to allow simulating completions
2443 * with complete() at any time.
2445 init_completion(&priv->cmd.context[i].done);
2448 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2449 priv->cmd.free_head = 0;
2451 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2452 spin_lock_init(&priv->cmd.context_lock);
2454 for (priv->cmd.token_mask = 1;
2455 priv->cmd.token_mask < priv->cmd.max_cmds;
2456 priv->cmd.token_mask <<= 1)
2458 --priv->cmd.token_mask;
2460 down(&priv->cmd.poll_sem);
2461 priv->cmd.use_events = 1;
2467 * Switch back to polling (used when shutting down the device)
2469 void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2471 struct mlx4_priv *priv = mlx4_priv(dev);
2474 priv->cmd.use_events = 0;
2476 for (i = 0; i < priv->cmd.max_cmds; ++i)
2477 down(&priv->cmd.event_sem);
2479 kfree(priv->cmd.context);
2481 up(&priv->cmd.poll_sem);
2484 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2486 struct mlx4_cmd_mailbox *mailbox;
2488 mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2490 return ERR_PTR(-ENOMEM);
2492 mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2494 if (!mailbox->buf) {
2496 return ERR_PTR(-ENOMEM);
2499 memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2503 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2505 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2506 struct mlx4_cmd_mailbox *mailbox)
2511 pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2514 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2516 u32 mlx4_comm_get_version(void)
2518 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2521 static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2523 if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2524 mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2525 vf, dev->persist->num_vfs);
2532 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2534 if (slave < 1 || slave > dev->persist->num_vfs) {
2536 "Bad slave number:%d (number of activated slaves: %lu)\n",
2537 slave, dev->num_slaves);
2543 void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2545 struct mlx4_priv *priv = mlx4_priv(dev);
2546 struct mlx4_cmd_context *context;
2549 spin_lock(&priv->cmd.context_lock);
2550 if (priv->cmd.context) {
2551 for (i = 0; i < priv->cmd.max_cmds; ++i) {
2552 context = &priv->cmd.context[i];
2553 context->fw_status = CMD_STAT_INTERNAL_ERR;
2555 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2556 complete(&context->done);
2559 spin_unlock(&priv->cmd.context_lock);
2562 struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2564 struct mlx4_active_ports actv_ports;
2567 bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2570 bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2574 vf = mlx4_get_vf_indx(dev, slave);
2578 bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2579 min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2580 dev->caps.num_ports));
2584 EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2586 int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2589 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2590 unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2592 if (port <= 0 || port > m)
2595 n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2601 EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2603 int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2605 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2606 if (test_bit(port - 1, actv_ports.ports))
2608 find_first_bit(actv_ports.ports, dev->caps.num_ports);
2612 EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2614 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2618 struct mlx4_slaves_pport slaves_pport;
2620 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2622 if (port <= 0 || port > dev->caps.num_ports)
2623 return slaves_pport;
2625 for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2626 struct mlx4_active_ports actv_ports =
2627 mlx4_get_active_ports(dev, i);
2628 if (test_bit(port - 1, actv_ports.ports))
2629 set_bit(i, slaves_pport.slaves);
2632 return slaves_pport;
2634 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2636 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2637 struct mlx4_dev *dev,
2638 const struct mlx4_active_ports *crit_ports)
2641 struct mlx4_slaves_pport slaves_pport;
2643 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2645 for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2646 struct mlx4_active_ports actv_ports =
2647 mlx4_get_active_ports(dev, i);
2648 if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2649 dev->caps.num_ports))
2650 set_bit(i, slaves_pport.slaves);
2653 return slaves_pport;
2655 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2657 static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2659 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2660 int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2662 int max_port = min_port +
2663 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2665 if (port < min_port)
2667 else if (port >= max_port)
2668 port = max_port - 1;
2673 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2675 struct mlx4_priv *priv = mlx4_priv(dev);
2676 struct mlx4_vport_state *s_info;
2679 if (!mlx4_is_master(dev))
2680 return -EPROTONOSUPPORT;
2682 slave = mlx4_get_slave_indx(dev, vf);
2686 port = mlx4_slaves_closest_port(dev, slave, port);
2687 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2689 mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2690 vf, port, s_info->mac);
2693 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2696 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2698 struct mlx4_priv *priv = mlx4_priv(dev);
2699 struct mlx4_vport_state *vf_admin;
2702 if ((!mlx4_is_master(dev)) ||
2703 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2704 return -EPROTONOSUPPORT;
2706 if ((vlan > 4095) || (qos > 7))
2709 slave = mlx4_get_slave_indx(dev, vf);
2713 port = mlx4_slaves_closest_port(dev, slave, port);
2714 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2716 if ((0 == vlan) && (0 == qos))
2717 vf_admin->default_vlan = MLX4_VGT;
2719 vf_admin->default_vlan = vlan;
2720 vf_admin->default_qos = qos;
2722 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2724 "updating vf %d port %d config will take effect on next VF restart\n",
2728 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2730 /* mlx4_get_slave_default_vlan -
2731 * return true if VST ( default vlan)
2732 * if VST, will return vlan & qos (if not NULL)
2734 bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
2737 struct mlx4_vport_oper_state *vp_oper;
2738 struct mlx4_priv *priv;
2740 priv = mlx4_priv(dev);
2741 port = mlx4_slaves_closest_port(dev, slave, port);
2742 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2744 if (MLX4_VGT != vp_oper->state.default_vlan) {
2746 *vlan = vp_oper->state.default_vlan;
2748 *qos = vp_oper->state.default_qos;
2753 EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
2755 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2757 struct mlx4_priv *priv = mlx4_priv(dev);
2758 struct mlx4_vport_state *s_info;
2761 if ((!mlx4_is_master(dev)) ||
2762 !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
2763 return -EPROTONOSUPPORT;
2765 slave = mlx4_get_slave_indx(dev, vf);
2769 port = mlx4_slaves_closest_port(dev, slave, port);
2770 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2771 s_info->spoofchk = setting;
2775 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
2777 int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
2779 struct mlx4_priv *priv = mlx4_priv(dev);
2780 struct mlx4_vport_state *s_info;
2783 if (!mlx4_is_master(dev))
2784 return -EPROTONOSUPPORT;
2786 slave = mlx4_get_slave_indx(dev, vf);
2790 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2793 /* need to convert it to a func */
2794 ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
2795 ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
2796 ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
2797 ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
2798 ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
2799 ivf->mac[5] = ((s_info->mac) & 0xff);
2801 ivf->vlan = s_info->default_vlan;
2802 ivf->qos = s_info->default_qos;
2803 ivf->max_tx_rate = s_info->tx_rate;
2804 ivf->min_tx_rate = 0;
2805 ivf->spoofchk = s_info->spoofchk;
2806 ivf->linkstate = s_info->link_state;
2810 EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
2812 int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
2814 struct mlx4_priv *priv = mlx4_priv(dev);
2815 struct mlx4_vport_state *s_info;
2819 slave = mlx4_get_slave_indx(dev, vf);
2823 port = mlx4_slaves_closest_port(dev, slave, port);
2824 switch (link_state) {
2825 case IFLA_VF_LINK_STATE_AUTO:
2826 /* get current link state */
2827 if (!priv->sense.do_sense_port[port])
2828 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2830 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2833 case IFLA_VF_LINK_STATE_ENABLE:
2834 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
2837 case IFLA_VF_LINK_STATE_DISABLE:
2838 link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
2842 mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
2843 link_state, slave, port);
2846 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2847 s_info->link_state = link_state;
2850 mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
2852 if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2854 "updating vf %d port %d no link state HW enforcment\n",
2858 EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
2860 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
2862 struct mlx4_priv *priv = mlx4_priv(dev);
2864 if (slave < 1 || slave >= dev->num_slaves ||
2865 port < 1 || port > MLX4_MAX_PORTS)
2868 return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
2869 MLX4_VF_SMI_ENABLED;
2871 EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
2873 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
2875 struct mlx4_priv *priv = mlx4_priv(dev);
2877 if (slave == mlx4_master_func_num(dev))
2880 if (slave < 1 || slave >= dev->num_slaves ||
2881 port < 1 || port > MLX4_MAX_PORTS)
2884 return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
2885 MLX4_VF_SMI_ENABLED;
2887 EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
2889 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
2892 struct mlx4_priv *priv = mlx4_priv(dev);
2894 if (slave == mlx4_master_func_num(dev))
2897 if (slave < 1 || slave >= dev->num_slaves ||
2898 port < 1 || port > MLX4_MAX_PORTS ||
2899 enabled < 0 || enabled > 1)
2902 priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
2905 EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);