2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/debugfs.h>
45 #include "mlx5_core.h"
59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE,
61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
65 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
78 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
79 struct mlx5_cmd_msg *in,
80 struct mlx5_cmd_msg *out,
81 void *uout, int uout_size,
83 void *context, int page_queue)
85 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
86 struct mlx5_cmd_work_ent *ent;
88 ent = kzalloc(sizeof(*ent), alloc_flags);
90 return ERR_PTR(-ENOMEM);
95 ent->uout_size = uout_size;
97 ent->context = context;
99 ent->page_queue = page_queue;
104 static u8 alloc_token(struct mlx5_cmd *cmd)
108 spin_lock(&cmd->token_lock);
113 spin_unlock(&cmd->token_lock);
118 static int alloc_ent(struct mlx5_cmd *cmd)
123 spin_lock_irqsave(&cmd->alloc_lock, flags);
124 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
125 if (ret < cmd->max_reg_cmds)
126 clear_bit(ret, &cmd->bitmask);
127 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
129 return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
132 static void free_ent(struct mlx5_cmd *cmd, int idx)
136 spin_lock_irqsave(&cmd->alloc_lock, flags);
137 set_bit(idx, &cmd->bitmask);
138 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
141 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
143 return cmd->cmd_buf + (idx << cmd->log_stride);
146 static u8 xor8_buf(void *buf, size_t offset, int len)
151 int end = len + offset;
153 for (i = offset; i < end; i++)
159 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
161 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
162 int xor_len = sizeof(*block) - sizeof(block->data) - 1;
164 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
167 if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
173 static void calc_block_sig(struct mlx5_cmd_prot_block *block)
175 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
176 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
178 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
179 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
182 static void calc_chain_sig(struct mlx5_cmd_msg *msg)
184 struct mlx5_cmd_mailbox *next = msg->next;
186 int blen = size - min_t(int, sizeof(msg->first.data), size);
187 int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
188 / MLX5_CMD_DATA_BLOCK_SIZE;
191 for (i = 0; i < n && next; i++) {
192 calc_block_sig(next->buf);
197 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
199 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
201 calc_chain_sig(ent->in);
202 calc_chain_sig(ent->out);
206 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
208 unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
212 own = ent->lay->status_own;
213 if (!(own & CMD_OWNER_HW)) {
217 usleep_range(5000, 10000);
218 } while (time_before(jiffies, poll_end));
220 ent->ret = -ETIMEDOUT;
223 static void free_cmd(struct mlx5_cmd_work_ent *ent)
229 static int verify_signature(struct mlx5_cmd_work_ent *ent)
231 struct mlx5_cmd_mailbox *next = ent->out->next;
234 int size = ent->out->len;
235 int blen = size - min_t(int, sizeof(ent->out->first.data), size);
236 int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
237 / MLX5_CMD_DATA_BLOCK_SIZE;
240 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
244 for (i = 0; i < n && next; i++) {
245 err = verify_block_sig(next->buf);
255 static void dump_buf(void *buf, int size, int data_only, int offset)
260 for (i = 0; i < size; i += 16) {
261 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
262 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
272 MLX5_DRIVER_STATUS_ABORTED = 0xfe,
273 MLX5_DRIVER_SYND = 0xbadd00de,
276 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
277 u32 *synd, u8 *status)
283 case MLX5_CMD_OP_TEARDOWN_HCA:
284 case MLX5_CMD_OP_DISABLE_HCA:
285 case MLX5_CMD_OP_MANAGE_PAGES:
286 case MLX5_CMD_OP_DESTROY_MKEY:
287 case MLX5_CMD_OP_DESTROY_EQ:
288 case MLX5_CMD_OP_DESTROY_CQ:
289 case MLX5_CMD_OP_DESTROY_QP:
290 case MLX5_CMD_OP_DESTROY_PSV:
291 case MLX5_CMD_OP_DESTROY_SRQ:
292 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
293 case MLX5_CMD_OP_DESTROY_DCT:
294 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
295 case MLX5_CMD_OP_DEALLOC_PD:
296 case MLX5_CMD_OP_DEALLOC_UAR:
297 case MLX5_CMD_OP_DETACH_FROM_MCG:
298 case MLX5_CMD_OP_DEALLOC_XRCD:
299 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
300 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
301 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
302 case MLX5_CMD_OP_DESTROY_LAG:
303 case MLX5_CMD_OP_DESTROY_VPORT_LAG:
304 case MLX5_CMD_OP_DESTROY_TIR:
305 case MLX5_CMD_OP_DESTROY_SQ:
306 case MLX5_CMD_OP_DESTROY_RQ:
307 case MLX5_CMD_OP_DESTROY_RMP:
308 case MLX5_CMD_OP_DESTROY_TIS:
309 case MLX5_CMD_OP_DESTROY_RQT:
310 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
311 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
312 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
313 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
314 case MLX5_CMD_OP_2ERR_QP:
315 case MLX5_CMD_OP_2RST_QP:
316 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
317 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
318 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
319 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
320 case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
321 return MLX5_CMD_STAT_OK;
323 case MLX5_CMD_OP_QUERY_HCA_CAP:
324 case MLX5_CMD_OP_QUERY_ADAPTER:
325 case MLX5_CMD_OP_INIT_HCA:
326 case MLX5_CMD_OP_ENABLE_HCA:
327 case MLX5_CMD_OP_QUERY_PAGES:
328 case MLX5_CMD_OP_SET_HCA_CAP:
329 case MLX5_CMD_OP_QUERY_ISSI:
330 case MLX5_CMD_OP_SET_ISSI:
331 case MLX5_CMD_OP_CREATE_MKEY:
332 case MLX5_CMD_OP_QUERY_MKEY:
333 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
334 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
335 case MLX5_CMD_OP_CREATE_EQ:
336 case MLX5_CMD_OP_QUERY_EQ:
337 case MLX5_CMD_OP_GEN_EQE:
338 case MLX5_CMD_OP_CREATE_CQ:
339 case MLX5_CMD_OP_QUERY_CQ:
340 case MLX5_CMD_OP_MODIFY_CQ:
341 case MLX5_CMD_OP_CREATE_QP:
342 case MLX5_CMD_OP_RST2INIT_QP:
343 case MLX5_CMD_OP_INIT2RTR_QP:
344 case MLX5_CMD_OP_RTR2RTS_QP:
345 case MLX5_CMD_OP_RTS2RTS_QP:
346 case MLX5_CMD_OP_SQERR2RTS_QP:
347 case MLX5_CMD_OP_QUERY_QP:
348 case MLX5_CMD_OP_SQD_RTS_QP:
349 case MLX5_CMD_OP_INIT2INIT_QP:
350 case MLX5_CMD_OP_CREATE_PSV:
351 case MLX5_CMD_OP_CREATE_SRQ:
352 case MLX5_CMD_OP_QUERY_SRQ:
353 case MLX5_CMD_OP_ARM_RQ:
354 case MLX5_CMD_OP_CREATE_XRC_SRQ:
355 case MLX5_CMD_OP_QUERY_XRC_SRQ:
356 case MLX5_CMD_OP_ARM_XRC_SRQ:
357 case MLX5_CMD_OP_CREATE_DCT:
358 case MLX5_CMD_OP_DRAIN_DCT:
359 case MLX5_CMD_OP_QUERY_DCT:
360 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
361 case MLX5_CMD_OP_QUERY_VPORT_STATE:
362 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
363 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
364 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
365 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
366 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
367 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
368 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
369 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
370 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
371 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
372 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
373 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
374 case MLX5_CMD_OP_QUERY_Q_COUNTER:
375 case MLX5_CMD_OP_ALLOC_PD:
376 case MLX5_CMD_OP_ALLOC_UAR:
377 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
378 case MLX5_CMD_OP_ACCESS_REG:
379 case MLX5_CMD_OP_ATTACH_TO_MCG:
380 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
381 case MLX5_CMD_OP_MAD_IFC:
382 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
383 case MLX5_CMD_OP_SET_MAD_DEMUX:
384 case MLX5_CMD_OP_NOP:
385 case MLX5_CMD_OP_ALLOC_XRCD:
386 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
387 case MLX5_CMD_OP_QUERY_CONG_STATUS:
388 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
389 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
390 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
391 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
392 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
393 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
394 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
395 case MLX5_CMD_OP_CREATE_LAG:
396 case MLX5_CMD_OP_MODIFY_LAG:
397 case MLX5_CMD_OP_QUERY_LAG:
398 case MLX5_CMD_OP_CREATE_VPORT_LAG:
399 case MLX5_CMD_OP_CREATE_TIR:
400 case MLX5_CMD_OP_MODIFY_TIR:
401 case MLX5_CMD_OP_QUERY_TIR:
402 case MLX5_CMD_OP_CREATE_SQ:
403 case MLX5_CMD_OP_MODIFY_SQ:
404 case MLX5_CMD_OP_QUERY_SQ:
405 case MLX5_CMD_OP_CREATE_RQ:
406 case MLX5_CMD_OP_MODIFY_RQ:
407 case MLX5_CMD_OP_QUERY_RQ:
408 case MLX5_CMD_OP_CREATE_RMP:
409 case MLX5_CMD_OP_MODIFY_RMP:
410 case MLX5_CMD_OP_QUERY_RMP:
411 case MLX5_CMD_OP_CREATE_TIS:
412 case MLX5_CMD_OP_MODIFY_TIS:
413 case MLX5_CMD_OP_QUERY_TIS:
414 case MLX5_CMD_OP_CREATE_RQT:
415 case MLX5_CMD_OP_MODIFY_RQT:
416 case MLX5_CMD_OP_QUERY_RQT:
418 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
419 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
420 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
421 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
423 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
424 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
425 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
426 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
427 *status = MLX5_DRIVER_STATUS_ABORTED;
428 *synd = MLX5_DRIVER_SYND;
431 mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
436 const char *mlx5_command_str(int command)
438 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
441 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
442 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
443 MLX5_COMMAND_STR_CASE(INIT_HCA);
444 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
445 MLX5_COMMAND_STR_CASE(ENABLE_HCA);
446 MLX5_COMMAND_STR_CASE(DISABLE_HCA);
447 MLX5_COMMAND_STR_CASE(QUERY_PAGES);
448 MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
449 MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
450 MLX5_COMMAND_STR_CASE(QUERY_ISSI);
451 MLX5_COMMAND_STR_CASE(SET_ISSI);
452 MLX5_COMMAND_STR_CASE(CREATE_MKEY);
453 MLX5_COMMAND_STR_CASE(QUERY_MKEY);
454 MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
455 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
456 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
457 MLX5_COMMAND_STR_CASE(CREATE_EQ);
458 MLX5_COMMAND_STR_CASE(DESTROY_EQ);
459 MLX5_COMMAND_STR_CASE(QUERY_EQ);
460 MLX5_COMMAND_STR_CASE(GEN_EQE);
461 MLX5_COMMAND_STR_CASE(CREATE_CQ);
462 MLX5_COMMAND_STR_CASE(DESTROY_CQ);
463 MLX5_COMMAND_STR_CASE(QUERY_CQ);
464 MLX5_COMMAND_STR_CASE(MODIFY_CQ);
465 MLX5_COMMAND_STR_CASE(CREATE_QP);
466 MLX5_COMMAND_STR_CASE(DESTROY_QP);
467 MLX5_COMMAND_STR_CASE(RST2INIT_QP);
468 MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
469 MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
470 MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
471 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
472 MLX5_COMMAND_STR_CASE(2ERR_QP);
473 MLX5_COMMAND_STR_CASE(2RST_QP);
474 MLX5_COMMAND_STR_CASE(QUERY_QP);
475 MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
476 MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
477 MLX5_COMMAND_STR_CASE(CREATE_PSV);
478 MLX5_COMMAND_STR_CASE(DESTROY_PSV);
479 MLX5_COMMAND_STR_CASE(CREATE_SRQ);
480 MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
481 MLX5_COMMAND_STR_CASE(QUERY_SRQ);
482 MLX5_COMMAND_STR_CASE(ARM_RQ);
483 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
484 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
485 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
486 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
487 MLX5_COMMAND_STR_CASE(CREATE_DCT);
488 MLX5_COMMAND_STR_CASE(DESTROY_DCT);
489 MLX5_COMMAND_STR_CASE(DRAIN_DCT);
490 MLX5_COMMAND_STR_CASE(QUERY_DCT);
491 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
492 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
493 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
494 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
495 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
496 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
497 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
498 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
499 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
500 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
501 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
502 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
503 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
504 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
505 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
506 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
507 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
508 MLX5_COMMAND_STR_CASE(ALLOC_PD);
509 MLX5_COMMAND_STR_CASE(DEALLOC_PD);
510 MLX5_COMMAND_STR_CASE(ALLOC_UAR);
511 MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
512 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
513 MLX5_COMMAND_STR_CASE(ACCESS_REG);
514 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
515 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
516 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
517 MLX5_COMMAND_STR_CASE(MAD_IFC);
518 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
519 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
520 MLX5_COMMAND_STR_CASE(NOP);
521 MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
522 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
523 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
524 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
525 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
526 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
527 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
528 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
529 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
530 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
531 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
532 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
533 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
534 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
535 MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
536 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
537 MLX5_COMMAND_STR_CASE(CREATE_LAG);
538 MLX5_COMMAND_STR_CASE(MODIFY_LAG);
539 MLX5_COMMAND_STR_CASE(QUERY_LAG);
540 MLX5_COMMAND_STR_CASE(DESTROY_LAG);
541 MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
542 MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
543 MLX5_COMMAND_STR_CASE(CREATE_TIR);
544 MLX5_COMMAND_STR_CASE(MODIFY_TIR);
545 MLX5_COMMAND_STR_CASE(DESTROY_TIR);
546 MLX5_COMMAND_STR_CASE(QUERY_TIR);
547 MLX5_COMMAND_STR_CASE(CREATE_SQ);
548 MLX5_COMMAND_STR_CASE(MODIFY_SQ);
549 MLX5_COMMAND_STR_CASE(DESTROY_SQ);
550 MLX5_COMMAND_STR_CASE(QUERY_SQ);
551 MLX5_COMMAND_STR_CASE(CREATE_RQ);
552 MLX5_COMMAND_STR_CASE(MODIFY_RQ);
553 MLX5_COMMAND_STR_CASE(DESTROY_RQ);
554 MLX5_COMMAND_STR_CASE(QUERY_RQ);
555 MLX5_COMMAND_STR_CASE(CREATE_RMP);
556 MLX5_COMMAND_STR_CASE(MODIFY_RMP);
557 MLX5_COMMAND_STR_CASE(DESTROY_RMP);
558 MLX5_COMMAND_STR_CASE(QUERY_RMP);
559 MLX5_COMMAND_STR_CASE(CREATE_TIS);
560 MLX5_COMMAND_STR_CASE(MODIFY_TIS);
561 MLX5_COMMAND_STR_CASE(DESTROY_TIS);
562 MLX5_COMMAND_STR_CASE(QUERY_TIS);
563 MLX5_COMMAND_STR_CASE(CREATE_RQT);
564 MLX5_COMMAND_STR_CASE(MODIFY_RQT);
565 MLX5_COMMAND_STR_CASE(DESTROY_RQT);
566 MLX5_COMMAND_STR_CASE(QUERY_RQT);
567 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
568 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
569 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
570 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
571 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
572 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
573 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
574 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
575 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
576 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
577 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
578 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
579 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
580 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
581 MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
582 MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
583 default: return "unknown command opcode";
587 static const char *cmd_status_str(u8 status)
590 case MLX5_CMD_STAT_OK:
592 case MLX5_CMD_STAT_INT_ERR:
593 return "internal error";
594 case MLX5_CMD_STAT_BAD_OP_ERR:
595 return "bad operation";
596 case MLX5_CMD_STAT_BAD_PARAM_ERR:
597 return "bad parameter";
598 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
599 return "bad system state";
600 case MLX5_CMD_STAT_BAD_RES_ERR:
601 return "bad resource";
602 case MLX5_CMD_STAT_RES_BUSY:
603 return "resource busy";
604 case MLX5_CMD_STAT_LIM_ERR:
605 return "limits exceeded";
606 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
607 return "bad resource state";
608 case MLX5_CMD_STAT_IX_ERR:
610 case MLX5_CMD_STAT_NO_RES_ERR:
611 return "no resources";
612 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
613 return "bad input length";
614 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
615 return "bad output length";
616 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
617 return "bad QP state";
618 case MLX5_CMD_STAT_BAD_PKT_ERR:
619 return "bad packet (discarded)";
620 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
621 return "bad size too many outstanding CQEs";
623 return "unknown status";
627 static int cmd_status_to_err(u8 status)
630 case MLX5_CMD_STAT_OK: return 0;
631 case MLX5_CMD_STAT_INT_ERR: return -EIO;
632 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
633 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
634 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
635 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
636 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
637 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
638 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
639 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
640 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
641 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
642 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
643 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
644 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
645 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
646 default: return -EIO;
650 struct mlx5_ifc_mbox_out_bits {
652 u8 reserved_at_8[0x18];
656 u8 reserved_at_40[0x40];
659 struct mlx5_ifc_mbox_in_bits {
661 u8 reserved_at_10[0x10];
663 u8 reserved_at_20[0x10];
666 u8 reserved_at_40[0x40];
669 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
671 *status = MLX5_GET(mbox_out, out, status);
672 *syndrome = MLX5_GET(mbox_out, out, syndrome);
675 static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
682 mlx5_cmd_mbox_status(out, &status, &syndrome);
686 opcode = MLX5_GET(mbox_in, in, opcode);
687 op_mod = MLX5_GET(mbox_in, in, op_mod);
690 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
691 mlx5_command_str(opcode),
693 cmd_status_str(status),
697 return cmd_status_to_err(status);
700 static void dump_command(struct mlx5_core_dev *dev,
701 struct mlx5_cmd_work_ent *ent, int input)
703 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
704 u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
705 struct mlx5_cmd_mailbox *next = msg->next;
710 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
713 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
714 "dump command data %s(0x%x) %s\n",
715 mlx5_command_str(op), op,
716 input ? "INPUT" : "OUTPUT");
718 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
719 mlx5_command_str(op), op,
720 input ? "INPUT" : "OUTPUT");
724 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
725 offset += sizeof(ent->lay->in);
727 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
728 offset += sizeof(ent->lay->out);
731 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
732 offset += sizeof(*ent->lay);
735 while (next && offset < msg->len) {
737 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
738 dump_buf(next->buf, dump_len, 1, offset);
739 offset += MLX5_CMD_DATA_BLOCK_SIZE;
741 mlx5_core_dbg(dev, "command block:\n");
742 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
743 offset += sizeof(struct mlx5_cmd_prot_block);
752 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
754 return MLX5_GET(mbox_in, in->first.data, opcode);
757 static void cb_timeout_handler(struct work_struct *work)
759 struct delayed_work *dwork = container_of(work, struct delayed_work,
761 struct mlx5_cmd_work_ent *ent = container_of(dwork,
762 struct mlx5_cmd_work_ent,
764 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
767 ent->ret = -ETIMEDOUT;
768 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
769 mlx5_command_str(msg_to_opcode(ent->in)),
770 msg_to_opcode(ent->in));
771 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
774 static void cmd_work_handler(struct work_struct *work)
776 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
777 struct mlx5_cmd *cmd = ent->cmd;
778 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
779 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
780 struct mlx5_cmd_layout *lay;
781 struct semaphore *sem;
784 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
786 if (!ent->page_queue) {
787 ent->idx = alloc_ent(cmd);
789 mlx5_core_err(dev, "failed to allocate command entry\n");
794 ent->idx = cmd->max_reg_cmds;
795 spin_lock_irqsave(&cmd->alloc_lock, flags);
796 clear_bit(ent->idx, &cmd->bitmask);
797 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
800 cmd->ent_arr[ent->idx] = ent;
801 lay = get_inst(cmd, ent->idx);
803 memset(lay, 0, sizeof(*lay));
804 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
805 ent->op = be32_to_cpu(lay->in[0]) >> 16;
807 lay->in_ptr = cpu_to_be64(ent->in->next->dma);
808 lay->inlen = cpu_to_be32(ent->in->len);
810 lay->out_ptr = cpu_to_be64(ent->out->next->dma);
811 lay->outlen = cpu_to_be32(ent->out->len);
812 lay->type = MLX5_PCI_CMD_XPORT;
813 lay->token = ent->token;
814 lay->status_own = CMD_OWNER_HW;
815 set_signature(ent, !cmd->checksum_disabled);
816 dump_command(dev, ent, 1);
817 ent->ts1 = ktime_get_ns();
820 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
822 /* ring doorbell after the descriptor is valid */
823 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
825 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
827 /* if not in polling don't use ent after this point */
828 if (cmd->mode == CMD_MODE_POLLING) {
830 /* make sure we read the descriptor after ownership is SW */
832 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
836 static const char *deliv_status_to_str(u8 status)
839 case MLX5_CMD_DELIVERY_STAT_OK:
841 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
842 return "signature error";
843 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
844 return "token error";
845 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
846 return "bad block number";
847 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
848 return "output pointer not aligned to block size";
849 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
850 return "input pointer not aligned to block size";
851 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
852 return "firmware internal error";
853 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
854 return "command input length error";
855 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
856 return "command ouput length error";
857 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
858 return "reserved fields not cleared";
859 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
860 return "bad command descriptor type";
862 return "unknown status code";
866 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
868 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
869 struct mlx5_cmd *cmd = &dev->cmd;
872 if (cmd->mode == CMD_MODE_POLLING) {
873 wait_for_completion(&ent->done);
874 } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
875 ent->ret = -ETIMEDOUT;
876 mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
881 if (err == -ETIMEDOUT) {
882 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
883 mlx5_command_str(msg_to_opcode(ent->in)),
884 msg_to_opcode(ent->in));
886 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
887 err, deliv_status_to_str(ent->status), ent->status);
893 * 1. Callback functions may not sleep
894 * 2. page queue commands do not support asynchrous completion
896 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
897 struct mlx5_cmd_msg *out, void *uout, int uout_size,
898 mlx5_cmd_cbk_t callback,
899 void *context, int page_queue, u8 *status,
902 struct mlx5_cmd *cmd = &dev->cmd;
903 struct mlx5_cmd_work_ent *ent;
904 struct mlx5_cmd_stats *stats;
909 if (callback && page_queue)
912 ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
920 init_completion(&ent->done);
922 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
923 INIT_WORK(&ent->work, cmd_work_handler);
925 cmd_work_handler(&ent->work);
926 } else if (!queue_work(cmd->wq, &ent->work)) {
927 mlx5_core_warn(dev, "failed to queue work\n");
935 err = wait_func(dev, ent);
936 if (err == -ETIMEDOUT)
939 ds = ent->ts2 - ent->ts1;
940 op = MLX5_GET(mbox_in, in->first.data, opcode);
941 if (op < ARRAY_SIZE(cmd->stats)) {
942 stats = &cmd->stats[op];
943 spin_lock_irq(&stats->lock);
946 spin_unlock_irq(&stats->lock);
948 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
949 "fw exec time for %s is %lld nsec\n",
950 mlx5_command_str(op), ds);
951 *status = ent->status;
959 static ssize_t dbg_write(struct file *filp, const char __user *buf,
960 size_t count, loff_t *pos)
962 struct mlx5_core_dev *dev = filp->private_data;
963 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
967 if (!dbg->in_msg || !dbg->out_msg)
970 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
973 lbuf[sizeof(lbuf) - 1] = 0;
975 if (strcmp(lbuf, "go"))
978 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
980 return err ? err : count;
984 static const struct file_operations fops = {
985 .owner = THIS_MODULE,
990 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
993 struct mlx5_cmd_prot_block *block;
994 struct mlx5_cmd_mailbox *next;
1000 copy = min_t(int, size, sizeof(to->first.data));
1001 memcpy(to->first.data, from, copy);
1012 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1014 memcpy(block->data, from, copy);
1017 block->token = token;
1024 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1026 struct mlx5_cmd_prot_block *block;
1027 struct mlx5_cmd_mailbox *next;
1033 copy = min_t(int, size, sizeof(from->first.data));
1034 memcpy(to, from->first.data, copy);
1045 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
1048 memcpy(to, block->data, copy);
1057 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
1060 struct mlx5_cmd_mailbox *mailbox;
1062 mailbox = kmalloc(sizeof(*mailbox), flags);
1064 return ERR_PTR(-ENOMEM);
1066 mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
1068 if (!mailbox->buf) {
1069 mlx5_core_dbg(dev, "failed allocation\n");
1071 return ERR_PTR(-ENOMEM);
1073 memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
1074 mailbox->next = NULL;
1079 static void free_cmd_box(struct mlx5_core_dev *dev,
1080 struct mlx5_cmd_mailbox *mailbox)
1082 pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
1086 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
1087 gfp_t flags, int size,
1090 struct mlx5_cmd_mailbox *tmp, *head = NULL;
1091 struct mlx5_cmd_prot_block *block;
1092 struct mlx5_cmd_msg *msg;
1098 msg = kzalloc(sizeof(*msg), flags);
1100 return ERR_PTR(-ENOMEM);
1102 blen = size - min_t(int, sizeof(msg->first.data), size);
1103 n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
1105 for (i = 0; i < n; i++) {
1106 tmp = alloc_cmd_box(dev, flags);
1108 mlx5_core_warn(dev, "failed allocating block\n");
1115 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
1116 block->block_num = cpu_to_be32(n - i - 1);
1117 block->token = token;
1127 free_cmd_box(dev, head);
1132 return ERR_PTR(err);
1135 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
1136 struct mlx5_cmd_msg *msg)
1138 struct mlx5_cmd_mailbox *head = msg->next;
1139 struct mlx5_cmd_mailbox *next;
1143 free_cmd_box(dev, head);
1149 static ssize_t data_write(struct file *filp, const char __user *buf,
1150 size_t count, loff_t *pos)
1152 struct mlx5_core_dev *dev = filp->private_data;
1153 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1162 ptr = memdup_user(buf, count);
1164 return PTR_ERR(ptr);
1173 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
1176 struct mlx5_core_dev *dev = filp->private_data;
1177 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1186 copy = min_t(int, count, dbg->outlen);
1187 if (copy_to_user(buf, dbg->out_msg, copy))
1195 static const struct file_operations dfops = {
1196 .owner = THIS_MODULE,
1197 .open = simple_open,
1198 .write = data_write,
1202 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
1205 struct mlx5_core_dev *dev = filp->private_data;
1206 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1213 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
1217 if (copy_to_user(buf, &outlen, err))
1225 static ssize_t outlen_write(struct file *filp, const char __user *buf,
1226 size_t count, loff_t *pos)
1228 struct mlx5_core_dev *dev = filp->private_data;
1229 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1235 if (*pos != 0 || count > 6)
1238 kfree(dbg->out_msg);
1239 dbg->out_msg = NULL;
1242 if (copy_from_user(outlen_str, buf, count))
1247 err = sscanf(outlen_str, "%d", &outlen);
1251 ptr = kzalloc(outlen, GFP_KERNEL);
1256 dbg->outlen = outlen;
1263 static const struct file_operations olfops = {
1264 .owner = THIS_MODULE,
1265 .open = simple_open,
1266 .write = outlen_write,
1267 .read = outlen_read,
1270 static void set_wqname(struct mlx5_core_dev *dev)
1272 struct mlx5_cmd *cmd = &dev->cmd;
1274 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
1275 dev_name(&dev->pdev->dev));
1278 static void clean_debug_files(struct mlx5_core_dev *dev)
1280 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1282 if (!mlx5_debugfs_root)
1285 mlx5_cmdif_debugfs_cleanup(dev);
1286 debugfs_remove_recursive(dbg->dbg_root);
1289 static int create_debugfs_files(struct mlx5_core_dev *dev)
1291 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1294 if (!mlx5_debugfs_root)
1297 dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1301 dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1306 dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1311 dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1313 if (!dbg->dbg_outlen)
1316 dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1318 if (!dbg->dbg_status)
1321 dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1325 mlx5_cmdif_debugfs_init(dev);
1330 clean_debug_files(dev);
1334 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1336 struct mlx5_cmd *cmd = &dev->cmd;
1339 for (i = 0; i < cmd->max_reg_cmds; i++)
1341 down(&cmd->pages_sem);
1345 up(&cmd->pages_sem);
1346 for (i = 0; i < cmd->max_reg_cmds; i++)
1350 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1352 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
1355 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1357 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
1360 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1362 unsigned long flags;
1365 spin_lock_irqsave(&msg->cache->lock, flags);
1366 list_add_tail(&msg->list, &msg->cache->head);
1367 spin_unlock_irqrestore(&msg->cache->lock, flags);
1369 mlx5_free_cmd_msg(dev, msg);
1373 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
1375 struct mlx5_cmd *cmd = &dev->cmd;
1376 struct mlx5_cmd_work_ent *ent;
1377 mlx5_cmd_cbk_t callback;
1382 struct mlx5_cmd_stats *stats;
1383 unsigned long flags;
1384 unsigned long vector;
1386 /* there can be at most 32 command queues */
1387 vector = vec & 0xffffffff;
1388 for (i = 0; i < (1 << cmd->log_sz); i++) {
1389 if (test_bit(i, &vector)) {
1390 struct semaphore *sem;
1392 ent = cmd->ent_arr[i];
1394 cancel_delayed_work(&ent->cb_timeout_work);
1395 if (ent->page_queue)
1396 sem = &cmd->pages_sem;
1399 ent->ts2 = ktime_get_ns();
1400 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1401 dump_command(dev, ent, 0);
1403 if (!cmd->checksum_disabled)
1404 ent->ret = verify_signature(ent);
1407 if (vec & MLX5_TRIGGERED_CMD_COMP)
1408 ent->status = MLX5_DRIVER_STATUS_ABORTED;
1410 ent->status = ent->lay->status_own >> 1;
1412 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1413 ent->ret, deliv_status_to_str(ent->status), ent->status);
1415 free_ent(cmd, ent->idx);
1417 if (ent->callback) {
1418 ds = ent->ts2 - ent->ts1;
1419 if (ent->op < ARRAY_SIZE(cmd->stats)) {
1420 stats = &cmd->stats[ent->op];
1421 spin_lock_irqsave(&stats->lock, flags);
1424 spin_unlock_irqrestore(&stats->lock, flags);
1427 callback = ent->callback;
1428 context = ent->context;
1431 err = mlx5_copy_from_msg(ent->uout,
1435 err = err ? err : mlx5_cmd_check(dev,
1436 ent->in->first.data,
1440 mlx5_free_cmd_msg(dev, ent->out);
1441 free_msg(dev, ent->in);
1443 err = err ? err : ent->status;
1445 callback(err, context);
1447 complete(&ent->done);
1453 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1455 static int status_to_err(u8 status)
1457 return status ? -1 : 0; /* TBD more meaningful codes */
1460 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1463 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1464 struct mlx5_cmd *cmd = &dev->cmd;
1465 struct cache_ent *ent = NULL;
1467 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1468 ent = &cmd->cache.large;
1469 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1470 ent = &cmd->cache.med;
1473 spin_lock_irq(&ent->lock);
1474 if (!list_empty(&ent->head)) {
1475 msg = list_entry(ent->head.next, typeof(*msg), list);
1476 /* For cached lists, we must explicitly state what is
1480 list_del(&msg->list);
1482 spin_unlock_irq(&ent->lock);
1486 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
1491 static int is_manage_pages(void *in)
1493 return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1496 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1497 int out_size, mlx5_cmd_cbk_t callback, void *context)
1499 struct mlx5_cmd_msg *inb;
1500 struct mlx5_cmd_msg *outb;
1508 if (pci_channel_offline(dev->pdev) ||
1509 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1510 u16 opcode = MLX5_GET(mbox_in, in, opcode);
1512 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
1513 MLX5_SET(mbox_out, out, status, status);
1514 MLX5_SET(mbox_out, out, syndrome, drv_synd);
1518 pages_queue = is_manage_pages(in);
1519 gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1521 inb = alloc_msg(dev, in_size, gfp);
1527 token = alloc_token(&dev->cmd);
1529 err = mlx5_copy_to_msg(inb, in, in_size, token);
1531 mlx5_core_warn(dev, "err %d\n", err);
1535 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
1537 err = PTR_ERR(outb);
1541 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1542 pages_queue, &status, token);
1546 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1548 err = status_to_err(status);
1553 err = mlx5_copy_from_msg(out, outb, out_size);
1557 mlx5_free_cmd_msg(dev, outb);
1565 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1570 err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
1571 return err ? : mlx5_cmd_check(dev, in, out);
1573 EXPORT_SYMBOL(mlx5_cmd_exec);
1575 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1576 void *out, int out_size, mlx5_cmd_cbk_t callback,
1579 return cmd_exec(dev, in, in_size, out, out_size, callback, context);
1581 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1583 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1585 struct mlx5_cmd *cmd = &dev->cmd;
1586 struct mlx5_cmd_msg *msg;
1587 struct mlx5_cmd_msg *n;
1589 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1590 list_del(&msg->list);
1591 mlx5_free_cmd_msg(dev, msg);
1594 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1595 list_del(&msg->list);
1596 mlx5_free_cmd_msg(dev, msg);
1600 static int create_msg_cache(struct mlx5_core_dev *dev)
1602 struct mlx5_cmd *cmd = &dev->cmd;
1603 struct mlx5_cmd_msg *msg;
1607 spin_lock_init(&cmd->cache.large.lock);
1608 INIT_LIST_HEAD(&cmd->cache.large.head);
1609 spin_lock_init(&cmd->cache.med.lock);
1610 INIT_LIST_HEAD(&cmd->cache.med.head);
1612 for (i = 0; i < NUM_LONG_LISTS; i++) {
1613 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
1618 msg->cache = &cmd->cache.large;
1619 list_add_tail(&msg->list, &cmd->cache.large.head);
1622 for (i = 0; i < NUM_MED_LISTS; i++) {
1623 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
1628 msg->cache = &cmd->cache.med;
1629 list_add_tail(&msg->list, &cmd->cache.med.head);
1635 destroy_msg_cache(dev);
1639 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1641 struct device *ddev = &dev->pdev->dev;
1643 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1644 &cmd->alloc_dma, GFP_KERNEL);
1645 if (!cmd->cmd_alloc_buf)
1648 /* make sure it is aligned to 4K */
1649 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1650 cmd->cmd_buf = cmd->cmd_alloc_buf;
1651 cmd->dma = cmd->alloc_dma;
1652 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1656 dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1658 cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
1659 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1660 &cmd->alloc_dma, GFP_KERNEL);
1661 if (!cmd->cmd_alloc_buf)
1664 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1665 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1666 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1670 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1672 struct device *ddev = &dev->pdev->dev;
1674 dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
1678 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1680 int size = sizeof(struct mlx5_cmd_prot_block);
1681 int align = roundup_pow_of_two(size);
1682 struct mlx5_cmd *cmd = &dev->cmd;
1688 memset(cmd, 0, sizeof(*cmd));
1689 cmd_if_rev = cmdif_rev(dev);
1690 if (cmd_if_rev != CMD_IF_REV) {
1691 dev_err(&dev->pdev->dev,
1692 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1693 CMD_IF_REV, cmd_if_rev);
1697 cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1701 err = alloc_cmd_page(dev, cmd);
1705 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1706 cmd->log_sz = cmd_l >> 4 & 0xf;
1707 cmd->log_stride = cmd_l & 0xf;
1708 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1709 dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1715 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1716 dev_err(&dev->pdev->dev, "command queue size overflow\n");
1721 cmd->checksum_disabled = 1;
1722 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1723 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1725 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1726 if (cmd->cmdif_rev > CMD_IF_REV) {
1727 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1728 CMD_IF_REV, cmd->cmdif_rev);
1733 spin_lock_init(&cmd->alloc_lock);
1734 spin_lock_init(&cmd->token_lock);
1735 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1736 spin_lock_init(&cmd->stats[i].lock);
1738 sema_init(&cmd->sem, cmd->max_reg_cmds);
1739 sema_init(&cmd->pages_sem, 1);
1741 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1742 cmd_l = (u32)(cmd->dma);
1743 if (cmd_l & 0xfff) {
1744 dev_err(&dev->pdev->dev, "invalid command queue address\n");
1749 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1750 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1752 /* Make sure firmware sees the complete address before we proceed */
1755 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1757 cmd->mode = CMD_MODE_POLLING;
1759 err = create_msg_cache(dev);
1761 dev_err(&dev->pdev->dev, "failed to create command cache\n");
1766 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1768 dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1773 err = create_debugfs_files(dev);
1782 destroy_workqueue(cmd->wq);
1785 destroy_msg_cache(dev);
1788 free_cmd_page(dev, cmd);
1791 pci_pool_destroy(cmd->pool);
1795 EXPORT_SYMBOL(mlx5_cmd_init);
1797 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1799 struct mlx5_cmd *cmd = &dev->cmd;
1801 clean_debug_files(dev);
1802 destroy_workqueue(cmd->wq);
1803 destroy_msg_cache(dev);
1804 free_cmd_page(dev, cmd);
1805 pci_pool_destroy(cmd->pool);
1807 EXPORT_SYMBOL(mlx5_cmd_cleanup);