2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/semaphore.h>
49 #define CMD_POLL_TOKEN 0xffff
50 #define INBOX_MASK 0xffffffffffffff00ULL
52 #define CMD_CHAN_VER 1
53 #define CMD_CHAN_IF_REV 1
56 /* command completed successfully: */
58 /* Internal error (such as a bus error) occurred while processing command: */
59 CMD_STAT_INTERNAL_ERR = 0x01,
60 /* Operation/command not supported or opcode modifier not supported: */
61 CMD_STAT_BAD_OP = 0x02,
62 /* Parameter not supported or parameter out of range: */
63 CMD_STAT_BAD_PARAM = 0x03,
64 /* System not enabled or bad system state: */
65 CMD_STAT_BAD_SYS_STATE = 0x04,
66 /* Attempt to access reserved or unallocaterd resource: */
67 CMD_STAT_BAD_RESOURCE = 0x05,
68 /* Requested resource is currently executing a command, or is otherwise busy: */
69 CMD_STAT_RESOURCE_BUSY = 0x06,
70 /* Required capability exceeds device limits: */
71 CMD_STAT_EXCEED_LIM = 0x08,
72 /* Resource is not in the appropriate state or ownership: */
73 CMD_STAT_BAD_RES_STATE = 0x09,
74 /* Index out of range: */
75 CMD_STAT_BAD_INDEX = 0x0a,
76 /* FW image corrupted: */
77 CMD_STAT_BAD_NVMEM = 0x0b,
78 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
79 CMD_STAT_ICM_ERROR = 0x0c,
80 /* Attempt to modify a QP/EE which is not in the presumed state: */
81 CMD_STAT_BAD_QP_STATE = 0x10,
82 /* Bad segment parameters (Address/Size): */
83 CMD_STAT_BAD_SEG_PARAM = 0x20,
84 /* Memory Region has Memory Windows bound to: */
85 CMD_STAT_REG_BOUND = 0x21,
86 /* HCA local attached memory not present: */
87 CMD_STAT_LAM_NOT_PRE = 0x22,
88 /* Bad management packet (silently discarded): */
89 CMD_STAT_BAD_PKT = 0x30,
90 /* More outstanding CQEs in CQ than new CQ size: */
91 CMD_STAT_BAD_SIZE = 0x40,
92 /* Multi Function device support required: */
93 CMD_STAT_MULTI_FUNC_REQ = 0x50,
97 HCR_IN_PARAM_OFFSET = 0x00,
98 HCR_IN_MODIFIER_OFFSET = 0x08,
99 HCR_OUT_PARAM_OFFSET = 0x0c,
100 HCR_TOKEN_OFFSET = 0x14,
101 HCR_STATUS_OFFSET = 0x18,
103 HCR_OPMOD_SHIFT = 12,
110 GO_BIT_TIMEOUT_MSECS = 10000
113 struct mlx4_cmd_context {
114 struct completion done;
122 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
123 struct mlx4_vhcr_cmd *in_vhcr);
125 static int mlx4_status_to_errno(u8 status)
127 static const int trans_table[] = {
128 [CMD_STAT_INTERNAL_ERR] = -EIO,
129 [CMD_STAT_BAD_OP] = -EPERM,
130 [CMD_STAT_BAD_PARAM] = -EINVAL,
131 [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
132 [CMD_STAT_BAD_RESOURCE] = -EBADF,
133 [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
134 [CMD_STAT_EXCEED_LIM] = -ENOMEM,
135 [CMD_STAT_BAD_RES_STATE] = -EBADF,
136 [CMD_STAT_BAD_INDEX] = -EBADF,
137 [CMD_STAT_BAD_NVMEM] = -EFAULT,
138 [CMD_STAT_ICM_ERROR] = -ENFILE,
139 [CMD_STAT_BAD_QP_STATE] = -EINVAL,
140 [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
141 [CMD_STAT_REG_BOUND] = -EBUSY,
142 [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
143 [CMD_STAT_BAD_PKT] = -EINVAL,
144 [CMD_STAT_BAD_SIZE] = -ENOMEM,
145 [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
148 if (status >= ARRAY_SIZE(trans_table) ||
149 (status != CMD_STAT_OK && trans_table[status] == 0))
152 return trans_table[status];
155 static int comm_pending(struct mlx4_dev *dev)
157 struct mlx4_priv *priv = mlx4_priv(dev);
158 u32 status = readl(&priv->mfunc.comm->slave_read);
160 return (swab32(status) >> 31) != priv->cmd.comm_toggle;
163 static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
165 struct mlx4_priv *priv = mlx4_priv(dev);
168 priv->cmd.comm_toggle ^= 1;
169 val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
170 __raw_writel((__force u32) cpu_to_be32(val),
171 &priv->mfunc.comm->slave_write);
175 /* dummy procedure for this patch */
176 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
181 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
182 unsigned long timeout)
184 struct mlx4_priv *priv = mlx4_priv(dev);
187 int ret_from_pending = 0;
189 /* First, verify that the master reports correct status */
190 if (comm_pending(dev)) {
191 mlx4_warn(dev, "Communication channel is not idle."
192 "my toggle is %d (cmd:0x%x)\n",
193 priv->cmd.comm_toggle, cmd);
198 down(&priv->cmd.poll_sem);
199 mlx4_comm_cmd_post(dev, cmd, param);
201 end = msecs_to_jiffies(timeout) + jiffies;
202 while (comm_pending(dev) && time_before(jiffies, end))
204 ret_from_pending = comm_pending(dev);
205 if (ret_from_pending) {
206 /* check if the slave is trying to boot in the middle of
207 * FLR process. The only non-zero result in the RESET command
208 * is MLX4_DELAY_RESET_SLAVE*/
209 if ((MLX4_COMM_CMD_RESET == cmd)) {
210 mlx4_warn(dev, "Got slave FLRed from Communication"
211 " channel (ret:0x%x)\n", ret_from_pending);
212 err = MLX4_DELAY_RESET_SLAVE;
214 mlx4_warn(dev, "Communication channel timed out\n");
219 up(&priv->cmd.poll_sem);
223 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
224 u16 param, unsigned long timeout)
226 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
227 struct mlx4_cmd_context *context;
230 down(&cmd->event_sem);
232 spin_lock(&cmd->context_lock);
233 BUG_ON(cmd->free_head < 0);
234 context = &cmd->context[cmd->free_head];
235 context->token += cmd->token_mask + 1;
236 cmd->free_head = context->next;
237 spin_unlock(&cmd->context_lock);
239 init_completion(&context->done);
241 mlx4_comm_cmd_post(dev, op, param);
243 if (!wait_for_completion_timeout(&context->done,
244 msecs_to_jiffies(timeout))) {
249 err = context->result;
250 if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
251 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
252 op, context->fw_status);
257 spin_lock(&cmd->context_lock);
258 context->next = cmd->free_head;
259 cmd->free_head = context - cmd->context;
260 spin_unlock(&cmd->context_lock);
266 static int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
267 unsigned long timeout)
269 if (mlx4_priv(dev)->cmd.use_events)
270 return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
271 return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
274 static int cmd_pending(struct mlx4_dev *dev)
276 u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
278 return (status & swab32(1 << HCR_GO_BIT)) ||
279 (mlx4_priv(dev)->cmd.toggle ==
280 !!(status & swab32(1 << HCR_T_BIT)));
283 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
284 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
287 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
288 u32 __iomem *hcr = cmd->hcr;
292 mutex_lock(&cmd->hcr_mutex);
296 end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
298 while (cmd_pending(dev)) {
299 if (time_after_eq(jiffies, end)) {
300 mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
307 * We use writel (instead of something like memcpy_toio)
308 * because writes of less than 32 bits to the HCR don't work
309 * (and some architectures such as ia64 implement memcpy_toio
310 * in terms of writeb).
312 __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
313 __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
314 __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
315 __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
316 __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
317 __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
319 /* __raw_writel may not order writes. */
322 __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
323 (cmd->toggle << HCR_T_BIT) |
324 (event ? (1 << HCR_E_BIT) : 0) |
325 (op_modifier << HCR_OPMOD_SHIFT) |
329 * Make sure that our HCR writes don't get mixed in with
330 * writes from another CPU starting a FW command.
334 cmd->toggle = cmd->toggle ^ 1;
339 mutex_unlock(&cmd->hcr_mutex);
343 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
344 int out_is_imm, u32 in_modifier, u8 op_modifier,
345 u16 op, unsigned long timeout)
347 struct mlx4_priv *priv = mlx4_priv(dev);
348 struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
351 down(&priv->cmd.slave_sem);
352 vhcr->in_param = cpu_to_be64(in_param);
353 vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
354 vhcr->in_modifier = cpu_to_be32(in_modifier);
355 vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
356 vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
358 vhcr->flags = !!(priv->cmd.use_events) << 6;
359 if (mlx4_is_master(dev)) {
360 ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
365 be64_to_cpu(vhcr->out_param);
367 mlx4_err(dev, "response expected while"
368 "output mailbox is NULL for "
369 "command 0x%x\n", op);
370 vhcr->status = -EINVAL;
376 ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
377 MLX4_COMM_TIME + timeout);
382 be64_to_cpu(vhcr->out_param);
384 mlx4_err(dev, "response expected while"
385 "output mailbox is NULL for "
386 "command 0x%x\n", op);
387 vhcr->status = -EINVAL;
392 mlx4_err(dev, "failed execution of VHCR_POST command"
393 "opcode 0x%x\n", op);
395 up(&priv->cmd.slave_sem);
399 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
400 int out_is_imm, u32 in_modifier, u8 op_modifier,
401 u16 op, unsigned long timeout)
403 struct mlx4_priv *priv = mlx4_priv(dev);
404 void __iomem *hcr = priv->cmd.hcr;
409 down(&priv->cmd.poll_sem);
411 err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
412 in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
416 end = msecs_to_jiffies(timeout) + jiffies;
417 while (cmd_pending(dev) && time_before(jiffies, end))
420 if (cmd_pending(dev)) {
427 (u64) be32_to_cpu((__force __be32)
428 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
429 (u64) be32_to_cpu((__force __be32)
430 __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
431 stat = be32_to_cpu((__force __be32)
432 __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
433 err = mlx4_status_to_errno(stat);
435 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
439 up(&priv->cmd.poll_sem);
443 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
445 struct mlx4_priv *priv = mlx4_priv(dev);
446 struct mlx4_cmd_context *context =
447 &priv->cmd.context[token & priv->cmd.token_mask];
449 /* previously timed out command completing at long last */
450 if (token != context->token)
453 context->fw_status = status;
454 context->result = mlx4_status_to_errno(status);
455 context->out_param = out_param;
457 complete(&context->done);
460 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
461 int out_is_imm, u32 in_modifier, u8 op_modifier,
462 u16 op, unsigned long timeout)
464 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
465 struct mlx4_cmd_context *context;
468 down(&cmd->event_sem);
470 spin_lock(&cmd->context_lock);
471 BUG_ON(cmd->free_head < 0);
472 context = &cmd->context[cmd->free_head];
473 context->token += cmd->token_mask + 1;
474 cmd->free_head = context->next;
475 spin_unlock(&cmd->context_lock);
477 init_completion(&context->done);
479 mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
480 in_modifier, op_modifier, op, context->token, 1);
482 if (!wait_for_completion_timeout(&context->done,
483 msecs_to_jiffies(timeout))) {
488 err = context->result;
490 mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
491 op, context->fw_status);
496 *out_param = context->out_param;
499 spin_lock(&cmd->context_lock);
500 context->next = cmd->free_head;
501 cmd->free_head = context - cmd->context;
502 spin_unlock(&cmd->context_lock);
508 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
509 int out_is_imm, u32 in_modifier, u8 op_modifier,
510 u16 op, unsigned long timeout, int native)
512 if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
513 if (mlx4_priv(dev)->cmd.use_events)
514 return mlx4_cmd_wait(dev, in_param, out_param,
515 out_is_imm, in_modifier,
516 op_modifier, op, timeout);
518 return mlx4_cmd_poll(dev, in_param, out_param,
519 out_is_imm, in_modifier,
520 op_modifier, op, timeout);
522 return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
523 in_modifier, op_modifier, op, timeout);
525 EXPORT_SYMBOL_GPL(__mlx4_cmd);
528 static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
530 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
531 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
534 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
535 int slave, u64 slave_addr,
536 int size, int is_read)
541 if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
542 (slave & ~0x7f) | (size & 0xff)) {
543 mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
544 "master_addr:0x%llx slave_id:%d size:%d\n",
545 slave_addr, master_addr, slave, size);
550 in_param = (u64) slave | slave_addr;
551 out_param = (u64) dev->caps.function | master_addr;
553 in_param = (u64) dev->caps.function | master_addr;
554 out_param = (u64) slave | slave_addr;
557 return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
559 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
562 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
563 struct mlx4_vhcr *vhcr,
564 struct mlx4_cmd_mailbox *inbox,
565 struct mlx4_cmd_mailbox *outbox,
566 struct mlx4_cmd_info *cmd)
572 in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
573 out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
574 if (cmd->encode_slave_id) {
575 in_param &= 0xffffffffffffff00ll;
579 err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
580 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
581 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
584 vhcr->out_param = out_param;
589 static struct mlx4_cmd_info cmd_info[] = {
591 .opcode = MLX4_CMD_QUERY_FW,
595 .encode_slave_id = false,
600 .opcode = MLX4_CMD_QUERY_HCA,
604 .encode_slave_id = false,
609 .opcode = MLX4_CMD_QUERY_DEV_CAP,
613 .encode_slave_id = false,
619 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
620 struct mlx4_vhcr_cmd *in_vhcr)
622 struct mlx4_priv *priv = mlx4_priv(dev);
623 struct mlx4_cmd_info *cmd = NULL;
624 struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
625 struct mlx4_vhcr *vhcr;
626 struct mlx4_cmd_mailbox *inbox = NULL;
627 struct mlx4_cmd_mailbox *outbox = NULL;
633 /* Create sw representation of Virtual HCR */
634 vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
638 /* DMA in the vHCR */
640 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
641 priv->mfunc.master.slave_state[slave].vhcr_dma,
642 ALIGN(sizeof(struct mlx4_vhcr_cmd),
643 MLX4_ACCESS_MEM_ALIGN), 1);
645 mlx4_err(dev, "%s:Failed reading vhcr"
646 "ret: 0x%x\n", __func__, ret);
652 /* Fill SW VHCR fields */
653 vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
654 vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
655 vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
656 vhcr->token = be16_to_cpu(vhcr_cmd->token);
657 vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
658 vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
659 vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
662 for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
663 if (vhcr->op == cmd_info[i].opcode) {
669 mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
671 vhcr_cmd->status = -EINVAL;
676 if (cmd->has_inbox) {
677 vhcr->in_param &= INBOX_MASK;
678 inbox = mlx4_alloc_cmd_mailbox(dev);
680 ret = PTR_ERR(inbox);
685 ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
687 MLX4_MAILBOX_SIZE, 1);
689 mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
690 __func__, cmd->opcode);
695 /* Apply permission and bound checks if applicable */
696 if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
697 mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
698 "checks for resource_id:%d\n", vhcr->op, slave,
700 vhcr_cmd->status = -EPERM;
704 /* Allocate outbox */
705 if (cmd->has_outbox) {
706 outbox = mlx4_alloc_cmd_mailbox(dev);
707 if (IS_ERR(outbox)) {
708 ret = PTR_ERR(outbox);
714 /* Execute the command! */
716 vhcr_cmd->status = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
719 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
721 in_param = cmd->has_inbox ? (u64) inbox->dma :
723 out_param = cmd->has_outbox ? (u64) outbox->dma :
725 vhcr_cmd->status = __mlx4_cmd(dev, in_param, &out_param,
726 cmd->out_is_imm, vhcr->in_modifier,
727 vhcr->op_modifier, vhcr->op,
728 MLX4_CMD_TIME_CLASS_A,
731 if (vhcr_cmd->status) {
732 mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
733 " error:%d, status %d\n",
734 vhcr->op, slave, vhcr->errno,
736 ret = vhcr_cmd->status;
740 if (cmd->out_is_imm) {
741 vhcr->out_param = out_param;
742 vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
746 /* Write outbox if command completed successfully */
747 if (cmd->has_outbox && !vhcr->errno) {
748 ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
750 MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
752 mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
758 /* DMA back vhcr result */
760 ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
761 priv->mfunc.master.slave_state[slave].vhcr_dma,
762 ALIGN(sizeof(struct mlx4_vhcr),
763 MLX4_ACCESS_MEM_ALIGN),
766 mlx4_err(dev, "%s:Failed writing vhcr result\n",
768 else if (vhcr->e_bit &&
769 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
770 mlx4_warn(dev, "Failed to generate command completion "
771 "eqe for slave %d\n", slave);
776 mlx4_free_cmd_mailbox(dev, inbox);
777 mlx4_free_cmd_mailbox(dev, outbox);
781 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
782 u16 param, u8 toggle)
784 struct mlx4_priv *priv = mlx4_priv(dev);
785 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
787 u32 slave_status = 0;
788 u8 is_going_down = 0;
790 slave_state[slave].comm_toggle ^= 1;
791 reply = (u32) slave_state[slave].comm_toggle << 31;
792 if (toggle != slave_state[slave].comm_toggle) {
793 mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
794 "STATE COMPROMISIED ***\n", toggle, slave);
797 if (cmd == MLX4_COMM_CMD_RESET) {
798 mlx4_warn(dev, "Received reset from slave:%d\n", slave);
799 slave_state[slave].active = false;
800 /*check if we are in the middle of FLR process,
801 if so return "retry" status to the slave*/
802 if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
803 slave_status = MLX4_DELAY_RESET_SLAVE;
804 goto inform_slave_state;
807 /* write the version in the event field */
808 reply |= mlx4_comm_get_version();
812 /*command from slave in the middle of FLR*/
813 if (cmd != MLX4_COMM_CMD_RESET &&
814 MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
815 mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
816 "in the middle of FLR\n", slave, cmd);
821 case MLX4_COMM_CMD_VHCR0:
822 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
824 slave_state[slave].vhcr_dma = ((u64) param) << 48;
825 priv->mfunc.master.slave_state[slave].cookie = 0;
826 mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
828 case MLX4_COMM_CMD_VHCR1:
829 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
831 slave_state[slave].vhcr_dma |= ((u64) param) << 32;
833 case MLX4_COMM_CMD_VHCR2:
834 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
836 slave_state[slave].vhcr_dma |= ((u64) param) << 16;
838 case MLX4_COMM_CMD_VHCR_EN:
839 if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
841 slave_state[slave].vhcr_dma |= param;
842 slave_state[slave].active = true;
844 case MLX4_COMM_CMD_VHCR_POST:
845 if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
846 (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
848 down(&priv->cmd.slave_sem);
849 if (mlx4_master_process_vhcr(dev, slave, NULL)) {
850 mlx4_err(dev, "Failed processing vhcr for slave:%d,"
851 " reseting slave.\n", slave);
852 up(&priv->cmd.slave_sem);
855 up(&priv->cmd.slave_sem);
858 mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
861 spin_lock(&priv->mfunc.master.slave_state_lock);
862 if (!slave_state[slave].is_slave_going_down)
863 slave_state[slave].last_cmd = cmd;
866 spin_unlock(&priv->mfunc.master.slave_state_lock);
868 mlx4_warn(dev, "Slave is going down aborting command(%d)"
869 " executing from slave:%d\n",
873 __raw_writel((__force u32) cpu_to_be32(reply),
874 &priv->mfunc.comm[slave].slave_read);
880 spin_lock(&priv->mfunc.master.slave_state_lock);
881 if (!slave_state[slave].is_slave_going_down)
882 slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
883 spin_unlock(&priv->mfunc.master.slave_state_lock);
884 /*with slave in the middle of flr, no need to clean resources again.*/
886 memset(&slave_state[slave].event_eq, 0,
887 sizeof(struct mlx4_slave_event_eq_info));
888 __raw_writel((__force u32) cpu_to_be32(reply),
889 &priv->mfunc.comm[slave].slave_read);
893 /* master command processing */
894 void mlx4_master_comm_channel(struct work_struct *work)
896 struct mlx4_mfunc_master_ctx *master =
898 struct mlx4_mfunc_master_ctx,
900 struct mlx4_mfunc *mfunc =
901 container_of(master, struct mlx4_mfunc, master);
902 struct mlx4_priv *priv =
903 container_of(mfunc, struct mlx4_priv, mfunc);
904 struct mlx4_dev *dev = &priv->dev;
914 bit_vec = master->comm_arm_bit_vector;
915 for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
916 vec = be32_to_cpu(bit_vec[i]);
917 for (j = 0; j < 32; j++) {
918 if (!(vec & (1 << j)))
921 slave = (i * 32) + j;
922 comm_cmd = swab32(readl(
923 &mfunc->comm[slave].slave_write));
924 slt = swab32(readl(&mfunc->comm[slave].slave_read))
926 toggle = comm_cmd >> 31;
928 if (master->slave_state[slave].comm_toggle
930 printk(KERN_INFO "slave %d out of sync."
931 " read toggle %d, state toggle %d. "
932 "Resynching.\n", slave, slt,
933 master->slave_state[slave].comm_toggle);
934 master->slave_state[slave].comm_toggle =
937 mlx4_master_do_cmd(dev, slave,
938 comm_cmd >> 16 & 0xff,
939 comm_cmd & 0xffff, toggle);
945 if (reported && reported != served)
946 mlx4_warn(dev, "Got command event with bitmask from %d slaves"
947 " but %d were served\n",
950 if (mlx4_ARM_COMM_CHANNEL(dev))
951 mlx4_warn(dev, "Failed to arm comm channel events\n");
954 int mlx4_cmd_init(struct mlx4_dev *dev)
956 struct mlx4_priv *priv = mlx4_priv(dev);
958 mutex_init(&priv->cmd.hcr_mutex);
959 sema_init(&priv->cmd.poll_sem, 1);
960 priv->cmd.use_events = 0;
961 priv->cmd.toggle = 1;
963 priv->cmd.hcr = NULL;
964 priv->mfunc.vhcr = NULL;
966 if (!mlx4_is_slave(dev)) {
967 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
968 MLX4_HCR_BASE, MLX4_HCR_SIZE);
969 if (!priv->cmd.hcr) {
970 mlx4_err(dev, "Couldn't map command register.\n");
975 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
977 MLX4_MAILBOX_SIZE, 0);
984 if (!mlx4_is_slave(dev))
985 iounmap(priv->cmd.hcr);
989 void mlx4_cmd_cleanup(struct mlx4_dev *dev)
991 struct mlx4_priv *priv = mlx4_priv(dev);
993 pci_pool_destroy(priv->cmd.pool);
995 if (!mlx4_is_slave(dev))
996 iounmap(priv->cmd.hcr);
1000 * Switch to using events to issue FW commands (can only be called
1001 * after event queue for command events has been initialized).
1003 int mlx4_cmd_use_events(struct mlx4_dev *dev)
1005 struct mlx4_priv *priv = mlx4_priv(dev);
1009 priv->cmd.context = kmalloc(priv->cmd.max_cmds *
1010 sizeof (struct mlx4_cmd_context),
1012 if (!priv->cmd.context)
1015 for (i = 0; i < priv->cmd.max_cmds; ++i) {
1016 priv->cmd.context[i].token = i;
1017 priv->cmd.context[i].next = i + 1;
1020 priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
1021 priv->cmd.free_head = 0;
1023 sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
1024 spin_lock_init(&priv->cmd.context_lock);
1026 for (priv->cmd.token_mask = 1;
1027 priv->cmd.token_mask < priv->cmd.max_cmds;
1028 priv->cmd.token_mask <<= 1)
1030 --priv->cmd.token_mask;
1032 down(&priv->cmd.poll_sem);
1033 priv->cmd.use_events = 1;
1039 * Switch back to polling (used when shutting down the device)
1041 void mlx4_cmd_use_polling(struct mlx4_dev *dev)
1043 struct mlx4_priv *priv = mlx4_priv(dev);
1046 priv->cmd.use_events = 0;
1048 for (i = 0; i < priv->cmd.max_cmds; ++i)
1049 down(&priv->cmd.event_sem);
1051 kfree(priv->cmd.context);
1053 up(&priv->cmd.poll_sem);
1056 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
1058 struct mlx4_cmd_mailbox *mailbox;
1060 mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
1062 return ERR_PTR(-ENOMEM);
1064 mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
1066 if (!mailbox->buf) {
1068 return ERR_PTR(-ENOMEM);
1073 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
1075 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
1076 struct mlx4_cmd_mailbox *mailbox)
1081 pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
1084 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
1086 u32 mlx4_comm_get_version(void)
1088 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;