2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
51 #define MLX4_PF_COUNTERS_PER_PORT 2
52 #define MLX4_VF_COUNTERS_PER_PORT 1
55 struct list_head list;
63 struct list_head list;
71 struct list_head list;
86 struct list_head list;
88 enum mlx4_protocol prot;
89 enum mlx4_steer_type steer;
94 RES_QP_BUSY = RES_ANY_BUSY,
96 /* QP number was allocated */
99 /* ICM memory for QP context was mapped */
102 /* QP is in hw ownership */
107 struct res_common com;
112 struct list_head mcg_list;
117 /* saved qp params before VST enforcement in order to restore on VGT */
127 enum res_mtt_states {
128 RES_MTT_BUSY = RES_ANY_BUSY,
132 static inline const char *mtt_states_str(enum res_mtt_states state)
135 case RES_MTT_BUSY: return "RES_MTT_BUSY";
136 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
137 default: return "Unknown";
142 struct res_common com;
147 enum res_mpt_states {
148 RES_MPT_BUSY = RES_ANY_BUSY,
155 struct res_common com;
161 RES_EQ_BUSY = RES_ANY_BUSY,
167 struct res_common com;
172 RES_CQ_BUSY = RES_ANY_BUSY,
178 struct res_common com;
183 enum res_srq_states {
184 RES_SRQ_BUSY = RES_ANY_BUSY,
190 struct res_common com;
196 enum res_counter_states {
197 RES_COUNTER_BUSY = RES_ANY_BUSY,
198 RES_COUNTER_ALLOCATED,
202 struct res_common com;
206 enum res_xrcdn_states {
207 RES_XRCD_BUSY = RES_ANY_BUSY,
212 struct res_common com;
216 enum res_fs_rule_states {
217 RES_FS_RULE_BUSY = RES_ANY_BUSY,
218 RES_FS_RULE_ALLOCATED,
222 struct res_common com;
226 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
228 struct rb_node *node = root->rb_node;
231 struct res_common *res = container_of(node, struct res_common,
234 if (res_id < res->res_id)
235 node = node->rb_left;
236 else if (res_id > res->res_id)
237 node = node->rb_right;
244 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
246 struct rb_node **new = &(root->rb_node), *parent = NULL;
248 /* Figure out where to put new node */
250 struct res_common *this = container_of(*new, struct res_common,
254 if (res->res_id < this->res_id)
255 new = &((*new)->rb_left);
256 else if (res->res_id > this->res_id)
257 new = &((*new)->rb_right);
262 /* Add new node and rebalance tree. */
263 rb_link_node(&res->node, parent, new);
264 rb_insert_color(&res->node, root);
279 static const char *resource_str(enum mlx4_resource rt)
282 case RES_QP: return "RES_QP";
283 case RES_CQ: return "RES_CQ";
284 case RES_SRQ: return "RES_SRQ";
285 case RES_MPT: return "RES_MPT";
286 case RES_MTT: return "RES_MTT";
287 case RES_MAC: return "RES_MAC";
288 case RES_VLAN: return "RES_VLAN";
289 case RES_EQ: return "RES_EQ";
290 case RES_COUNTER: return "RES_COUNTER";
291 case RES_FS_RULE: return "RES_FS_RULE";
292 case RES_XRCD: return "RES_XRCD";
293 default: return "Unknown resource type !!!";
297 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
298 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
299 enum mlx4_resource res_type, int count,
302 struct mlx4_priv *priv = mlx4_priv(dev);
303 struct resource_allocator *res_alloc =
304 &priv->mfunc.master.res_tracker.res_alloc[res_type];
306 int allocated, free, reserved, guaranteed, from_free;
309 if (slave > dev->persist->num_vfs)
312 spin_lock(&res_alloc->alloc_lock);
313 allocated = (port > 0) ?
314 res_alloc->allocated[(port - 1) *
315 (dev->persist->num_vfs + 1) + slave] :
316 res_alloc->allocated[slave];
317 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
319 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
320 res_alloc->res_reserved;
321 guaranteed = res_alloc->guaranteed[slave];
323 if (allocated + count > res_alloc->quota[slave]) {
324 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
325 slave, port, resource_str(res_type), count,
326 allocated, res_alloc->quota[slave]);
330 if (allocated + count <= guaranteed) {
334 /* portion may need to be obtained from free area */
335 if (guaranteed - allocated > 0)
336 from_free = count - (guaranteed - allocated);
340 from_rsvd = count - from_free;
342 if (free - from_free >= reserved)
345 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
346 slave, port, resource_str(res_type), free,
347 from_free, reserved);
351 /* grant the request */
353 res_alloc->allocated[(port - 1) *
354 (dev->persist->num_vfs + 1) + slave] += count;
355 res_alloc->res_port_free[port - 1] -= count;
356 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
358 res_alloc->allocated[slave] += count;
359 res_alloc->res_free -= count;
360 res_alloc->res_reserved -= from_rsvd;
365 spin_unlock(&res_alloc->alloc_lock);
369 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
370 enum mlx4_resource res_type, int count,
373 struct mlx4_priv *priv = mlx4_priv(dev);
374 struct resource_allocator *res_alloc =
375 &priv->mfunc.master.res_tracker.res_alloc[res_type];
376 int allocated, guaranteed, from_rsvd;
378 if (slave > dev->persist->num_vfs)
381 spin_lock(&res_alloc->alloc_lock);
383 allocated = (port > 0) ?
384 res_alloc->allocated[(port - 1) *
385 (dev->persist->num_vfs + 1) + slave] :
386 res_alloc->allocated[slave];
387 guaranteed = res_alloc->guaranteed[slave];
389 if (allocated - count >= guaranteed) {
392 /* portion may need to be returned to reserved area */
393 if (allocated - guaranteed > 0)
394 from_rsvd = count - (allocated - guaranteed);
400 res_alloc->allocated[(port - 1) *
401 (dev->persist->num_vfs + 1) + slave] -= count;
402 res_alloc->res_port_free[port - 1] += count;
403 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
405 res_alloc->allocated[slave] -= count;
406 res_alloc->res_free += count;
407 res_alloc->res_reserved += from_rsvd;
410 spin_unlock(&res_alloc->alloc_lock);
414 static inline void initialize_res_quotas(struct mlx4_dev *dev,
415 struct resource_allocator *res_alloc,
416 enum mlx4_resource res_type,
417 int vf, int num_instances)
419 res_alloc->guaranteed[vf] = num_instances /
420 (2 * (dev->persist->num_vfs + 1));
421 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
422 if (vf == mlx4_master_func_num(dev)) {
423 res_alloc->res_free = num_instances;
424 if (res_type == RES_MTT) {
425 /* reserved mtts will be taken out of the PF allocation */
426 res_alloc->res_free += dev->caps.reserved_mtts;
427 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
428 res_alloc->quota[vf] += dev->caps.reserved_mtts;
433 void mlx4_init_quotas(struct mlx4_dev *dev)
435 struct mlx4_priv *priv = mlx4_priv(dev);
438 /* quotas for VFs are initialized in mlx4_slave_cap */
439 if (mlx4_is_slave(dev))
442 if (!mlx4_is_mfunc(dev)) {
443 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
444 mlx4_num_reserved_sqps(dev);
445 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
446 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
447 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
448 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
452 pf = mlx4_master_func_num(dev);
454 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
456 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
458 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
460 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
462 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
465 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
467 /* reduce the sink counter */
468 return (dev->caps.max_counters - 1 -
469 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
473 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
475 struct mlx4_priv *priv = mlx4_priv(dev);
478 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
480 priv->mfunc.master.res_tracker.slave_list =
481 kzalloc(dev->num_slaves * sizeof(struct slave_list),
483 if (!priv->mfunc.master.res_tracker.slave_list)
486 for (i = 0 ; i < dev->num_slaves; i++) {
487 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
488 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
489 slave_list[i].res_list[t]);
490 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
493 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
495 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
496 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
498 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
499 struct resource_allocator *res_alloc =
500 &priv->mfunc.master.res_tracker.res_alloc[i];
501 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
502 sizeof(int), GFP_KERNEL);
503 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
504 sizeof(int), GFP_KERNEL);
505 if (i == RES_MAC || i == RES_VLAN)
506 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
507 (dev->persist->num_vfs
509 sizeof(int), GFP_KERNEL);
511 res_alloc->allocated = kzalloc((dev->persist->
513 sizeof(int), GFP_KERNEL);
514 /* Reduce the sink counter */
515 if (i == RES_COUNTER)
516 res_alloc->res_free = dev->caps.max_counters - 1;
518 if (!res_alloc->quota || !res_alloc->guaranteed ||
519 !res_alloc->allocated)
522 spin_lock_init(&res_alloc->alloc_lock);
523 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
524 struct mlx4_active_ports actv_ports =
525 mlx4_get_active_ports(dev, t);
528 initialize_res_quotas(dev, res_alloc, RES_QP,
529 t, dev->caps.num_qps -
530 dev->caps.reserved_qps -
531 mlx4_num_reserved_sqps(dev));
534 initialize_res_quotas(dev, res_alloc, RES_CQ,
535 t, dev->caps.num_cqs -
536 dev->caps.reserved_cqs);
539 initialize_res_quotas(dev, res_alloc, RES_SRQ,
540 t, dev->caps.num_srqs -
541 dev->caps.reserved_srqs);
544 initialize_res_quotas(dev, res_alloc, RES_MPT,
545 t, dev->caps.num_mpts -
546 dev->caps.reserved_mrws);
549 initialize_res_quotas(dev, res_alloc, RES_MTT,
550 t, dev->caps.num_mtts -
551 dev->caps.reserved_mtts);
554 if (t == mlx4_master_func_num(dev)) {
555 int max_vfs_pport = 0;
556 /* Calculate the max vfs per port for */
558 for (j = 0; j < dev->caps.num_ports;
560 struct mlx4_slaves_pport slaves_pport =
561 mlx4_phys_to_slaves_pport(dev, j + 1);
562 unsigned current_slaves =
563 bitmap_weight(slaves_pport.slaves,
564 dev->caps.num_ports) - 1;
565 if (max_vfs_pport < current_slaves)
569 res_alloc->quota[t] =
572 res_alloc->guaranteed[t] = 2;
573 for (j = 0; j < MLX4_MAX_PORTS; j++)
574 res_alloc->res_port_free[j] =
577 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
578 res_alloc->guaranteed[t] = 2;
582 if (t == mlx4_master_func_num(dev)) {
583 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
584 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
585 for (j = 0; j < MLX4_MAX_PORTS; j++)
586 res_alloc->res_port_free[j] =
589 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
590 res_alloc->guaranteed[t] = 0;
594 res_alloc->quota[t] = dev->caps.max_counters;
595 if (t == mlx4_master_func_num(dev))
596 res_alloc->guaranteed[t] =
597 MLX4_PF_COUNTERS_PER_PORT *
599 else if (t <= max_vfs_guarantee_counter)
600 res_alloc->guaranteed[t] =
601 MLX4_VF_COUNTERS_PER_PORT *
604 res_alloc->guaranteed[t] = 0;
605 res_alloc->res_free -= res_alloc->guaranteed[t];
610 if (i == RES_MAC || i == RES_VLAN) {
611 for (j = 0; j < dev->caps.num_ports; j++)
612 if (test_bit(j, actv_ports.ports))
613 res_alloc->res_port_rsvd[j] +=
614 res_alloc->guaranteed[t];
616 res_alloc->res_reserved += res_alloc->guaranteed[t];
620 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
624 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
625 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
626 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
627 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
628 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
629 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
630 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
635 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
636 enum mlx4_res_tracker_free_type type)
638 struct mlx4_priv *priv = mlx4_priv(dev);
641 if (priv->mfunc.master.res_tracker.slave_list) {
642 if (type != RES_TR_FREE_STRUCTS_ONLY) {
643 for (i = 0; i < dev->num_slaves; i++) {
644 if (type == RES_TR_FREE_ALL ||
645 dev->caps.function != i)
646 mlx4_delete_all_resources_for_slave(dev, i);
648 /* free master's vlans */
649 i = dev->caps.function;
650 mlx4_reset_roce_gids(dev, i);
651 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
652 rem_slave_vlans(dev, i);
653 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
656 if (type != RES_TR_FREE_SLAVES_ONLY) {
657 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
658 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
659 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
660 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
661 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
662 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
663 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
665 kfree(priv->mfunc.master.res_tracker.slave_list);
666 priv->mfunc.master.res_tracker.slave_list = NULL;
671 static void update_pkey_index(struct mlx4_dev *dev, int slave,
672 struct mlx4_cmd_mailbox *inbox)
674 u8 sched = *(u8 *)(inbox->buf + 64);
675 u8 orig_index = *(u8 *)(inbox->buf + 35);
677 struct mlx4_priv *priv = mlx4_priv(dev);
680 port = (sched >> 6 & 1) + 1;
682 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
683 *(u8 *)(inbox->buf + 35) = new_index;
686 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
689 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
690 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
691 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
694 if (MLX4_QP_ST_UD == ts) {
695 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
696 if (mlx4_is_eth(dev, port))
697 qp_ctx->pri_path.mgid_index =
698 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
700 qp_ctx->pri_path.mgid_index = slave | 0x80;
702 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
703 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
704 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
705 if (mlx4_is_eth(dev, port)) {
706 qp_ctx->pri_path.mgid_index +=
707 mlx4_get_base_gid_ix(dev, slave, port);
708 qp_ctx->pri_path.mgid_index &= 0x7f;
710 qp_ctx->pri_path.mgid_index = slave & 0x7F;
713 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
714 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
715 if (mlx4_is_eth(dev, port)) {
716 qp_ctx->alt_path.mgid_index +=
717 mlx4_get_base_gid_ix(dev, slave, port);
718 qp_ctx->alt_path.mgid_index &= 0x7f;
720 qp_ctx->alt_path.mgid_index = slave & 0x7F;
726 static int update_vport_qp_param(struct mlx4_dev *dev,
727 struct mlx4_cmd_mailbox *inbox,
730 struct mlx4_qp_context *qpc = inbox->buf + 8;
731 struct mlx4_vport_oper_state *vp_oper;
732 struct mlx4_priv *priv;
736 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
737 priv = mlx4_priv(dev);
738 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
739 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
741 if (MLX4_VGT != vp_oper->state.default_vlan) {
742 /* the reserved QPs (special, proxy, tunnel)
743 * do not operate over vlans
745 if (mlx4_is_qp_reserved(dev, qpn))
748 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
749 if (qp_type == MLX4_QP_ST_UD ||
750 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
751 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
752 *(__be32 *)inbox->buf =
753 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
754 MLX4_QP_OPTPAR_VLAN_STRIPPING);
755 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
757 struct mlx4_update_qp_params params = {.flags = 0};
759 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
765 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
766 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
767 qpc->pri_path.vlan_control =
768 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
769 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
770 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
771 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
772 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
773 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
774 } else if (0 != vp_oper->state.default_vlan) {
775 qpc->pri_path.vlan_control =
776 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
777 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
778 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
779 } else { /* priority tagged */
780 qpc->pri_path.vlan_control =
781 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
782 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
785 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
786 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
787 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
788 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
789 qpc->pri_path.sched_queue &= 0xC7;
790 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
791 qpc->qos_vport = vp_oper->state.qos_vport;
793 if (vp_oper->state.spoofchk) {
794 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
795 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
801 static int mpt_mask(struct mlx4_dev *dev)
803 return dev->caps.num_mpts - 1;
806 static void *find_res(struct mlx4_dev *dev, u64 res_id,
807 enum mlx4_resource type)
809 struct mlx4_priv *priv = mlx4_priv(dev);
811 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
815 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
816 enum mlx4_resource type,
819 struct res_common *r;
822 spin_lock_irq(mlx4_tlock(dev));
823 r = find_res(dev, res_id, type);
829 if (r->state == RES_ANY_BUSY) {
834 if (r->owner != slave) {
839 r->from_state = r->state;
840 r->state = RES_ANY_BUSY;
843 *((struct res_common **)res) = r;
846 spin_unlock_irq(mlx4_tlock(dev));
850 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
851 enum mlx4_resource type,
852 u64 res_id, int *slave)
855 struct res_common *r;
861 spin_lock(mlx4_tlock(dev));
863 r = find_res(dev, id, type);
868 spin_unlock(mlx4_tlock(dev));
873 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
874 enum mlx4_resource type)
876 struct res_common *r;
878 spin_lock_irq(mlx4_tlock(dev));
879 r = find_res(dev, res_id, type);
881 r->state = r->from_state;
882 spin_unlock_irq(mlx4_tlock(dev));
885 static struct res_common *alloc_qp_tr(int id)
889 ret = kzalloc(sizeof *ret, GFP_KERNEL);
893 ret->com.res_id = id;
894 ret->com.state = RES_QP_RESERVED;
896 INIT_LIST_HEAD(&ret->mcg_list);
897 spin_lock_init(&ret->mcg_spl);
898 atomic_set(&ret->ref_count, 0);
903 static struct res_common *alloc_mtt_tr(int id, int order)
907 ret = kzalloc(sizeof *ret, GFP_KERNEL);
911 ret->com.res_id = id;
913 ret->com.state = RES_MTT_ALLOCATED;
914 atomic_set(&ret->ref_count, 0);
919 static struct res_common *alloc_mpt_tr(int id, int key)
923 ret = kzalloc(sizeof *ret, GFP_KERNEL);
927 ret->com.res_id = id;
928 ret->com.state = RES_MPT_RESERVED;
934 static struct res_common *alloc_eq_tr(int id)
938 ret = kzalloc(sizeof *ret, GFP_KERNEL);
942 ret->com.res_id = id;
943 ret->com.state = RES_EQ_RESERVED;
948 static struct res_common *alloc_cq_tr(int id)
952 ret = kzalloc(sizeof *ret, GFP_KERNEL);
956 ret->com.res_id = id;
957 ret->com.state = RES_CQ_ALLOCATED;
958 atomic_set(&ret->ref_count, 0);
963 static struct res_common *alloc_srq_tr(int id)
967 ret = kzalloc(sizeof *ret, GFP_KERNEL);
971 ret->com.res_id = id;
972 ret->com.state = RES_SRQ_ALLOCATED;
973 atomic_set(&ret->ref_count, 0);
978 static struct res_common *alloc_counter_tr(int id, int port)
980 struct res_counter *ret;
982 ret = kzalloc(sizeof *ret, GFP_KERNEL);
986 ret->com.res_id = id;
987 ret->com.state = RES_COUNTER_ALLOCATED;
993 static struct res_common *alloc_xrcdn_tr(int id)
995 struct res_xrcdn *ret;
997 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1001 ret->com.res_id = id;
1002 ret->com.state = RES_XRCD_ALLOCATED;
1007 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1009 struct res_fs_rule *ret;
1011 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1015 ret->com.res_id = id;
1016 ret->com.state = RES_FS_RULE_ALLOCATED;
1021 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1024 struct res_common *ret;
1028 ret = alloc_qp_tr(id);
1031 ret = alloc_mpt_tr(id, extra);
1034 ret = alloc_mtt_tr(id, extra);
1037 ret = alloc_eq_tr(id);
1040 ret = alloc_cq_tr(id);
1043 ret = alloc_srq_tr(id);
1046 pr_err("implementation missing\n");
1049 ret = alloc_counter_tr(id, extra);
1052 ret = alloc_xrcdn_tr(id);
1055 ret = alloc_fs_rule_tr(id, extra);
1066 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1067 enum mlx4_resource type, int extra)
1071 struct mlx4_priv *priv = mlx4_priv(dev);
1072 struct res_common **res_arr;
1073 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1074 struct rb_root *root = &tracker->res_tree[type];
1076 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1080 for (i = 0; i < count; ++i) {
1081 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1083 for (--i; i >= 0; --i)
1091 spin_lock_irq(mlx4_tlock(dev));
1092 for (i = 0; i < count; ++i) {
1093 if (find_res(dev, base + i, type)) {
1097 err = res_tracker_insert(root, res_arr[i]);
1100 list_add_tail(&res_arr[i]->list,
1101 &tracker->slave_list[slave].res_list[type]);
1103 spin_unlock_irq(mlx4_tlock(dev));
1109 for (--i; i >= base; --i)
1110 rb_erase(&res_arr[i]->node, root);
1112 spin_unlock_irq(mlx4_tlock(dev));
1114 for (i = 0; i < count; ++i)
1122 static int remove_qp_ok(struct res_qp *res)
1124 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1125 !list_empty(&res->mcg_list)) {
1126 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1127 res->com.state, atomic_read(&res->ref_count));
1129 } else if (res->com.state != RES_QP_RESERVED) {
1136 static int remove_mtt_ok(struct res_mtt *res, int order)
1138 if (res->com.state == RES_MTT_BUSY ||
1139 atomic_read(&res->ref_count)) {
1140 pr_devel("%s-%d: state %s, ref_count %d\n",
1142 mtt_states_str(res->com.state),
1143 atomic_read(&res->ref_count));
1145 } else if (res->com.state != RES_MTT_ALLOCATED)
1147 else if (res->order != order)
1153 static int remove_mpt_ok(struct res_mpt *res)
1155 if (res->com.state == RES_MPT_BUSY)
1157 else if (res->com.state != RES_MPT_RESERVED)
1163 static int remove_eq_ok(struct res_eq *res)
1165 if (res->com.state == RES_MPT_BUSY)
1167 else if (res->com.state != RES_MPT_RESERVED)
1173 static int remove_counter_ok(struct res_counter *res)
1175 if (res->com.state == RES_COUNTER_BUSY)
1177 else if (res->com.state != RES_COUNTER_ALLOCATED)
1183 static int remove_xrcdn_ok(struct res_xrcdn *res)
1185 if (res->com.state == RES_XRCD_BUSY)
1187 else if (res->com.state != RES_XRCD_ALLOCATED)
1193 static int remove_fs_rule_ok(struct res_fs_rule *res)
1195 if (res->com.state == RES_FS_RULE_BUSY)
1197 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1203 static int remove_cq_ok(struct res_cq *res)
1205 if (res->com.state == RES_CQ_BUSY)
1207 else if (res->com.state != RES_CQ_ALLOCATED)
1213 static int remove_srq_ok(struct res_srq *res)
1215 if (res->com.state == RES_SRQ_BUSY)
1217 else if (res->com.state != RES_SRQ_ALLOCATED)
1223 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1227 return remove_qp_ok((struct res_qp *)res);
1229 return remove_cq_ok((struct res_cq *)res);
1231 return remove_srq_ok((struct res_srq *)res);
1233 return remove_mpt_ok((struct res_mpt *)res);
1235 return remove_mtt_ok((struct res_mtt *)res, extra);
1239 return remove_eq_ok((struct res_eq *)res);
1241 return remove_counter_ok((struct res_counter *)res);
1243 return remove_xrcdn_ok((struct res_xrcdn *)res);
1245 return remove_fs_rule_ok((struct res_fs_rule *)res);
1251 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1252 enum mlx4_resource type, int extra)
1256 struct mlx4_priv *priv = mlx4_priv(dev);
1257 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1258 struct res_common *r;
1260 spin_lock_irq(mlx4_tlock(dev));
1261 for (i = base; i < base + count; ++i) {
1262 r = res_tracker_lookup(&tracker->res_tree[type], i);
1267 if (r->owner != slave) {
1271 err = remove_ok(r, type, extra);
1276 for (i = base; i < base + count; ++i) {
1277 r = res_tracker_lookup(&tracker->res_tree[type], i);
1278 rb_erase(&r->node, &tracker->res_tree[type]);
1285 spin_unlock_irq(mlx4_tlock(dev));
1290 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1291 enum res_qp_states state, struct res_qp **qp,
1294 struct mlx4_priv *priv = mlx4_priv(dev);
1295 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1299 spin_lock_irq(mlx4_tlock(dev));
1300 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1303 else if (r->com.owner != slave)
1308 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1309 __func__, r->com.res_id);
1313 case RES_QP_RESERVED:
1314 if (r->com.state == RES_QP_MAPPED && !alloc)
1317 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1322 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1323 r->com.state == RES_QP_HW)
1326 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1334 if (r->com.state != RES_QP_MAPPED)
1342 r->com.from_state = r->com.state;
1343 r->com.to_state = state;
1344 r->com.state = RES_QP_BUSY;
1350 spin_unlock_irq(mlx4_tlock(dev));
1355 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1356 enum res_mpt_states state, struct res_mpt **mpt)
1358 struct mlx4_priv *priv = mlx4_priv(dev);
1359 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1363 spin_lock_irq(mlx4_tlock(dev));
1364 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1367 else if (r->com.owner != slave)
1375 case RES_MPT_RESERVED:
1376 if (r->com.state != RES_MPT_MAPPED)
1380 case RES_MPT_MAPPED:
1381 if (r->com.state != RES_MPT_RESERVED &&
1382 r->com.state != RES_MPT_HW)
1387 if (r->com.state != RES_MPT_MAPPED)
1395 r->com.from_state = r->com.state;
1396 r->com.to_state = state;
1397 r->com.state = RES_MPT_BUSY;
1403 spin_unlock_irq(mlx4_tlock(dev));
1408 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1409 enum res_eq_states state, struct res_eq **eq)
1411 struct mlx4_priv *priv = mlx4_priv(dev);
1412 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1416 spin_lock_irq(mlx4_tlock(dev));
1417 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1420 else if (r->com.owner != slave)
1428 case RES_EQ_RESERVED:
1429 if (r->com.state != RES_EQ_HW)
1434 if (r->com.state != RES_EQ_RESERVED)
1443 r->com.from_state = r->com.state;
1444 r->com.to_state = state;
1445 r->com.state = RES_EQ_BUSY;
1451 spin_unlock_irq(mlx4_tlock(dev));
1456 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1457 enum res_cq_states state, struct res_cq **cq)
1459 struct mlx4_priv *priv = mlx4_priv(dev);
1460 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1464 spin_lock_irq(mlx4_tlock(dev));
1465 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1468 } else if (r->com.owner != slave) {
1470 } else if (state == RES_CQ_ALLOCATED) {
1471 if (r->com.state != RES_CQ_HW)
1473 else if (atomic_read(&r->ref_count))
1477 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1484 r->com.from_state = r->com.state;
1485 r->com.to_state = state;
1486 r->com.state = RES_CQ_BUSY;
1491 spin_unlock_irq(mlx4_tlock(dev));
1496 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1497 enum res_srq_states state, struct res_srq **srq)
1499 struct mlx4_priv *priv = mlx4_priv(dev);
1500 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1504 spin_lock_irq(mlx4_tlock(dev));
1505 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1508 } else if (r->com.owner != slave) {
1510 } else if (state == RES_SRQ_ALLOCATED) {
1511 if (r->com.state != RES_SRQ_HW)
1513 else if (atomic_read(&r->ref_count))
1515 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1520 r->com.from_state = r->com.state;
1521 r->com.to_state = state;
1522 r->com.state = RES_SRQ_BUSY;
1527 spin_unlock_irq(mlx4_tlock(dev));
1532 static void res_abort_move(struct mlx4_dev *dev, int slave,
1533 enum mlx4_resource type, int id)
1535 struct mlx4_priv *priv = mlx4_priv(dev);
1536 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1537 struct res_common *r;
1539 spin_lock_irq(mlx4_tlock(dev));
1540 r = res_tracker_lookup(&tracker->res_tree[type], id);
1541 if (r && (r->owner == slave))
1542 r->state = r->from_state;
1543 spin_unlock_irq(mlx4_tlock(dev));
1546 static void res_end_move(struct mlx4_dev *dev, int slave,
1547 enum mlx4_resource type, int id)
1549 struct mlx4_priv *priv = mlx4_priv(dev);
1550 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1551 struct res_common *r;
1553 spin_lock_irq(mlx4_tlock(dev));
1554 r = res_tracker_lookup(&tracker->res_tree[type], id);
1555 if (r && (r->owner == slave))
1556 r->state = r->to_state;
1557 spin_unlock_irq(mlx4_tlock(dev));
1560 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1562 return mlx4_is_qp_reserved(dev, qpn) &&
1563 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1566 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1568 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1571 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1572 u64 in_param, u64 *out_param)
1582 case RES_OP_RESERVE:
1583 count = get_param_l(&in_param) & 0xffffff;
1584 /* Turn off all unsupported QP allocation flags that the
1585 * slave tries to set.
1587 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1588 align = get_param_h(&in_param);
1589 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1593 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1595 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1599 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1601 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1602 __mlx4_qp_release_range(dev, base, count);
1605 set_param_l(out_param, base);
1607 case RES_OP_MAP_ICM:
1608 qpn = get_param_l(&in_param) & 0x7fffff;
1609 if (valid_reserved(dev, slave, qpn)) {
1610 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1615 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1620 if (!fw_reserved(dev, qpn)) {
1621 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1623 res_abort_move(dev, slave, RES_QP, qpn);
1628 res_end_move(dev, slave, RES_QP, qpn);
1638 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1639 u64 in_param, u64 *out_param)
1645 if (op != RES_OP_RESERVE_AND_MAP)
1648 order = get_param_l(&in_param);
1650 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1654 base = __mlx4_alloc_mtt_range(dev, order);
1656 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1660 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1662 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1663 __mlx4_free_mtt_range(dev, base, order);
1665 set_param_l(out_param, base);
1671 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1672 u64 in_param, u64 *out_param)
1677 struct res_mpt *mpt;
1680 case RES_OP_RESERVE:
1681 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1685 index = __mlx4_mpt_reserve(dev);
1687 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1690 id = index & mpt_mask(dev);
1692 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1694 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1695 __mlx4_mpt_release(dev, index);
1698 set_param_l(out_param, index);
1700 case RES_OP_MAP_ICM:
1701 index = get_param_l(&in_param);
1702 id = index & mpt_mask(dev);
1703 err = mr_res_start_move_to(dev, slave, id,
1704 RES_MPT_MAPPED, &mpt);
1708 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1710 res_abort_move(dev, slave, RES_MPT, id);
1714 res_end_move(dev, slave, RES_MPT, id);
1720 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1721 u64 in_param, u64 *out_param)
1727 case RES_OP_RESERVE_AND_MAP:
1728 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1732 err = __mlx4_cq_alloc_icm(dev, &cqn);
1734 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1738 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1740 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1741 __mlx4_cq_free_icm(dev, cqn);
1745 set_param_l(out_param, cqn);
1755 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1756 u64 in_param, u64 *out_param)
1762 case RES_OP_RESERVE_AND_MAP:
1763 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1767 err = __mlx4_srq_alloc_icm(dev, &srqn);
1769 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1773 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1775 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1776 __mlx4_srq_free_icm(dev, srqn);
1780 set_param_l(out_param, srqn);
1790 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1791 u8 smac_index, u64 *mac)
1793 struct mlx4_priv *priv = mlx4_priv(dev);
1794 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1795 struct list_head *mac_list =
1796 &tracker->slave_list[slave].res_list[RES_MAC];
1797 struct mac_res *res, *tmp;
1799 list_for_each_entry_safe(res, tmp, mac_list, list) {
1800 if (res->smac_index == smac_index && res->port == (u8) port) {
1808 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1810 struct mlx4_priv *priv = mlx4_priv(dev);
1811 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1812 struct list_head *mac_list =
1813 &tracker->slave_list[slave].res_list[RES_MAC];
1814 struct mac_res *res, *tmp;
1816 list_for_each_entry_safe(res, tmp, mac_list, list) {
1817 if (res->mac == mac && res->port == (u8) port) {
1818 /* mac found. update ref count */
1824 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1826 res = kzalloc(sizeof *res, GFP_KERNEL);
1828 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1832 res->port = (u8) port;
1833 res->smac_index = smac_index;
1835 list_add_tail(&res->list,
1836 &tracker->slave_list[slave].res_list[RES_MAC]);
1840 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1843 struct mlx4_priv *priv = mlx4_priv(dev);
1844 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1845 struct list_head *mac_list =
1846 &tracker->slave_list[slave].res_list[RES_MAC];
1847 struct mac_res *res, *tmp;
1849 list_for_each_entry_safe(res, tmp, mac_list, list) {
1850 if (res->mac == mac && res->port == (u8) port) {
1851 if (!--res->ref_count) {
1852 list_del(&res->list);
1853 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1861 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1863 struct mlx4_priv *priv = mlx4_priv(dev);
1864 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1865 struct list_head *mac_list =
1866 &tracker->slave_list[slave].res_list[RES_MAC];
1867 struct mac_res *res, *tmp;
1870 list_for_each_entry_safe(res, tmp, mac_list, list) {
1871 list_del(&res->list);
1872 /* dereference the mac the num times the slave referenced it */
1873 for (i = 0; i < res->ref_count; i++)
1874 __mlx4_unregister_mac(dev, res->port, res->mac);
1875 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1880 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1881 u64 in_param, u64 *out_param, int in_port)
1888 if (op != RES_OP_RESERVE_AND_MAP)
1891 port = !in_port ? get_param_l(out_param) : in_port;
1892 port = mlx4_slave_convert_port(
1899 err = __mlx4_register_mac(dev, port, mac);
1902 set_param_l(out_param, err);
1907 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1909 __mlx4_unregister_mac(dev, port, mac);
1914 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1915 int port, int vlan_index)
1917 struct mlx4_priv *priv = mlx4_priv(dev);
1918 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1919 struct list_head *vlan_list =
1920 &tracker->slave_list[slave].res_list[RES_VLAN];
1921 struct vlan_res *res, *tmp;
1923 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1924 if (res->vlan == vlan && res->port == (u8) port) {
1925 /* vlan found. update ref count */
1931 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1933 res = kzalloc(sizeof(*res), GFP_KERNEL);
1935 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1939 res->port = (u8) port;
1940 res->vlan_index = vlan_index;
1942 list_add_tail(&res->list,
1943 &tracker->slave_list[slave].res_list[RES_VLAN]);
1948 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1951 struct mlx4_priv *priv = mlx4_priv(dev);
1952 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1953 struct list_head *vlan_list =
1954 &tracker->slave_list[slave].res_list[RES_VLAN];
1955 struct vlan_res *res, *tmp;
1957 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1958 if (res->vlan == vlan && res->port == (u8) port) {
1959 if (!--res->ref_count) {
1960 list_del(&res->list);
1961 mlx4_release_resource(dev, slave, RES_VLAN,
1970 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1972 struct mlx4_priv *priv = mlx4_priv(dev);
1973 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1974 struct list_head *vlan_list =
1975 &tracker->slave_list[slave].res_list[RES_VLAN];
1976 struct vlan_res *res, *tmp;
1979 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1980 list_del(&res->list);
1981 /* dereference the vlan the num times the slave referenced it */
1982 for (i = 0; i < res->ref_count; i++)
1983 __mlx4_unregister_vlan(dev, res->port, res->vlan);
1984 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1989 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1990 u64 in_param, u64 *out_param, int in_port)
1992 struct mlx4_priv *priv = mlx4_priv(dev);
1993 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1999 port = !in_port ? get_param_l(out_param) : in_port;
2001 if (!port || op != RES_OP_RESERVE_AND_MAP)
2004 port = mlx4_slave_convert_port(
2009 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2010 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2011 slave_state[slave].old_vlan_api = true;
2015 vlan = (u16) in_param;
2017 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2019 set_param_l(out_param, (u32) vlan_index);
2020 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2022 __mlx4_unregister_vlan(dev, port, vlan);
2027 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2028 u64 in_param, u64 *out_param)
2033 if (op != RES_OP_RESERVE)
2036 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2040 err = __mlx4_counter_alloc(dev, &index);
2042 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2046 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2048 __mlx4_counter_free(dev, index);
2049 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2051 set_param_l(out_param, index);
2057 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2058 u64 in_param, u64 *out_param)
2063 if (op != RES_OP_RESERVE)
2066 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2070 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2072 __mlx4_xrcd_free(dev, xrcdn);
2074 set_param_l(out_param, xrcdn);
2079 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2080 struct mlx4_vhcr *vhcr,
2081 struct mlx4_cmd_mailbox *inbox,
2082 struct mlx4_cmd_mailbox *outbox,
2083 struct mlx4_cmd_info *cmd)
2086 int alop = vhcr->op_modifier;
2088 switch (vhcr->in_modifier & 0xFF) {
2090 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2091 vhcr->in_param, &vhcr->out_param);
2095 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2096 vhcr->in_param, &vhcr->out_param);
2100 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2101 vhcr->in_param, &vhcr->out_param);
2105 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2106 vhcr->in_param, &vhcr->out_param);
2110 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2111 vhcr->in_param, &vhcr->out_param);
2115 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2116 vhcr->in_param, &vhcr->out_param,
2117 (vhcr->in_modifier >> 8) & 0xFF);
2121 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2122 vhcr->in_param, &vhcr->out_param,
2123 (vhcr->in_modifier >> 8) & 0xFF);
2127 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2128 vhcr->in_param, &vhcr->out_param);
2132 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2133 vhcr->in_param, &vhcr->out_param);
2144 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2153 case RES_OP_RESERVE:
2154 base = get_param_l(&in_param) & 0x7fffff;
2155 count = get_param_h(&in_param);
2156 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2159 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2160 __mlx4_qp_release_range(dev, base, count);
2162 case RES_OP_MAP_ICM:
2163 qpn = get_param_l(&in_param) & 0x7fffff;
2164 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2169 if (!fw_reserved(dev, qpn))
2170 __mlx4_qp_free_icm(dev, qpn);
2172 res_end_move(dev, slave, RES_QP, qpn);
2174 if (valid_reserved(dev, slave, qpn))
2175 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2184 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2185 u64 in_param, u64 *out_param)
2191 if (op != RES_OP_RESERVE_AND_MAP)
2194 base = get_param_l(&in_param);
2195 order = get_param_h(&in_param);
2196 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2198 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2199 __mlx4_free_mtt_range(dev, base, order);
2204 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2210 struct res_mpt *mpt;
2213 case RES_OP_RESERVE:
2214 index = get_param_l(&in_param);
2215 id = index & mpt_mask(dev);
2216 err = get_res(dev, slave, id, RES_MPT, &mpt);
2220 put_res(dev, slave, id, RES_MPT);
2222 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2225 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2226 __mlx4_mpt_release(dev, index);
2228 case RES_OP_MAP_ICM:
2229 index = get_param_l(&in_param);
2230 id = index & mpt_mask(dev);
2231 err = mr_res_start_move_to(dev, slave, id,
2232 RES_MPT_RESERVED, &mpt);
2236 __mlx4_mpt_free_icm(dev, mpt->key);
2237 res_end_move(dev, slave, RES_MPT, id);
2247 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2248 u64 in_param, u64 *out_param)
2254 case RES_OP_RESERVE_AND_MAP:
2255 cqn = get_param_l(&in_param);
2256 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2260 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2261 __mlx4_cq_free_icm(dev, cqn);
2272 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2273 u64 in_param, u64 *out_param)
2279 case RES_OP_RESERVE_AND_MAP:
2280 srqn = get_param_l(&in_param);
2281 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2285 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2286 __mlx4_srq_free_icm(dev, srqn);
2297 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2298 u64 in_param, u64 *out_param, int in_port)
2304 case RES_OP_RESERVE_AND_MAP:
2305 port = !in_port ? get_param_l(out_param) : in_port;
2306 port = mlx4_slave_convert_port(
2311 mac_del_from_slave(dev, slave, in_param, port);
2312 __mlx4_unregister_mac(dev, port, in_param);
2323 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2324 u64 in_param, u64 *out_param, int port)
2326 struct mlx4_priv *priv = mlx4_priv(dev);
2327 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2330 port = mlx4_slave_convert_port(
2336 case RES_OP_RESERVE_AND_MAP:
2337 if (slave_state[slave].old_vlan_api)
2341 vlan_del_from_slave(dev, slave, in_param, port);
2342 __mlx4_unregister_vlan(dev, port, in_param);
2352 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2353 u64 in_param, u64 *out_param)
2358 if (op != RES_OP_RESERVE)
2361 index = get_param_l(&in_param);
2362 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2365 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2369 __mlx4_counter_free(dev, index);
2370 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2375 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2376 u64 in_param, u64 *out_param)
2381 if (op != RES_OP_RESERVE)
2384 xrcdn = get_param_l(&in_param);
2385 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2389 __mlx4_xrcd_free(dev, xrcdn);
2394 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2395 struct mlx4_vhcr *vhcr,
2396 struct mlx4_cmd_mailbox *inbox,
2397 struct mlx4_cmd_mailbox *outbox,
2398 struct mlx4_cmd_info *cmd)
2401 int alop = vhcr->op_modifier;
2403 switch (vhcr->in_modifier & 0xFF) {
2405 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2410 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2411 vhcr->in_param, &vhcr->out_param);
2415 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2420 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2421 vhcr->in_param, &vhcr->out_param);
2425 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2426 vhcr->in_param, &vhcr->out_param);
2430 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2431 vhcr->in_param, &vhcr->out_param,
2432 (vhcr->in_modifier >> 8) & 0xFF);
2436 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2437 vhcr->in_param, &vhcr->out_param,
2438 (vhcr->in_modifier >> 8) & 0xFF);
2442 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2443 vhcr->in_param, &vhcr->out_param);
2447 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2448 vhcr->in_param, &vhcr->out_param);
2456 /* ugly but other choices are uglier */
2457 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2459 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2462 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2464 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2467 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2469 return be32_to_cpu(mpt->mtt_sz);
2472 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2474 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2477 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2479 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2482 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2484 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2487 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2489 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2492 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2494 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2497 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2499 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2502 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2504 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2505 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2506 int log_sq_sride = qpc->sq_size_stride & 7;
2507 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2508 int log_rq_stride = qpc->rq_size_stride & 7;
2509 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2510 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2511 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2512 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2517 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2519 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2520 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2521 total_mem = sq_size + rq_size;
2523 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2529 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2530 int size, struct res_mtt *mtt)
2532 int res_start = mtt->com.res_id;
2533 int res_size = (1 << mtt->order);
2535 if (start < res_start || start + size > res_start + res_size)
2540 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2541 struct mlx4_vhcr *vhcr,
2542 struct mlx4_cmd_mailbox *inbox,
2543 struct mlx4_cmd_mailbox *outbox,
2544 struct mlx4_cmd_info *cmd)
2547 int index = vhcr->in_modifier;
2548 struct res_mtt *mtt;
2549 struct res_mpt *mpt;
2550 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2556 id = index & mpt_mask(dev);
2557 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2561 /* Disable memory windows for VFs. */
2562 if (!mr_is_region(inbox->buf)) {
2567 /* Make sure that the PD bits related to the slave id are zeros. */
2568 pd = mr_get_pd(inbox->buf);
2569 pd_slave = (pd >> 17) & 0x7f;
2570 if (pd_slave != 0 && --pd_slave != slave) {
2575 if (mr_is_fmr(inbox->buf)) {
2576 /* FMR and Bind Enable are forbidden in slave devices. */
2577 if (mr_is_bind_enabled(inbox->buf)) {
2581 /* FMR and Memory Windows are also forbidden. */
2582 if (!mr_is_region(inbox->buf)) {
2588 phys = mr_phys_mpt(inbox->buf);
2590 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2594 err = check_mtt_range(dev, slave, mtt_base,
2595 mr_get_mtt_size(inbox->buf), mtt);
2602 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2607 atomic_inc(&mtt->ref_count);
2608 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2611 res_end_move(dev, slave, RES_MPT, id);
2616 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2618 res_abort_move(dev, slave, RES_MPT, id);
2623 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2624 struct mlx4_vhcr *vhcr,
2625 struct mlx4_cmd_mailbox *inbox,
2626 struct mlx4_cmd_mailbox *outbox,
2627 struct mlx4_cmd_info *cmd)
2630 int index = vhcr->in_modifier;
2631 struct res_mpt *mpt;
2634 id = index & mpt_mask(dev);
2635 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2639 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2644 atomic_dec(&mpt->mtt->ref_count);
2646 res_end_move(dev, slave, RES_MPT, id);
2650 res_abort_move(dev, slave, RES_MPT, id);
2655 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2656 struct mlx4_vhcr *vhcr,
2657 struct mlx4_cmd_mailbox *inbox,
2658 struct mlx4_cmd_mailbox *outbox,
2659 struct mlx4_cmd_info *cmd)
2662 int index = vhcr->in_modifier;
2663 struct res_mpt *mpt;
2666 id = index & mpt_mask(dev);
2667 err = get_res(dev, slave, id, RES_MPT, &mpt);
2671 if (mpt->com.from_state == RES_MPT_MAPPED) {
2672 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2673 * that, the VF must read the MPT. But since the MPT entry memory is not
2674 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2675 * entry contents. To guarantee that the MPT cannot be changed, the driver
2676 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2677 * ownership fofollowing the change. The change here allows the VF to
2678 * perform QUERY_MPT also when the entry is in SW ownership.
2680 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2681 &mlx4_priv(dev)->mr_table.dmpt_table,
2684 if (NULL == mpt_entry || NULL == outbox->buf) {
2689 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2692 } else if (mpt->com.from_state == RES_MPT_HW) {
2693 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2701 put_res(dev, slave, id, RES_MPT);
2705 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2707 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2710 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2712 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2715 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2717 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2720 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2721 struct mlx4_qp_context *context)
2723 u32 qpn = vhcr->in_modifier & 0xffffff;
2726 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2729 /* adjust qkey in qp context */
2730 context->qkey = cpu_to_be32(qkey);
2733 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2734 struct mlx4_qp_context *qpc,
2735 struct mlx4_cmd_mailbox *inbox);
2737 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2738 struct mlx4_vhcr *vhcr,
2739 struct mlx4_cmd_mailbox *inbox,
2740 struct mlx4_cmd_mailbox *outbox,
2741 struct mlx4_cmd_info *cmd)
2744 int qpn = vhcr->in_modifier & 0x7fffff;
2745 struct res_mtt *mtt;
2747 struct mlx4_qp_context *qpc = inbox->buf + 8;
2748 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2749 int mtt_size = qp_get_mtt_size(qpc);
2752 int rcqn = qp_get_rcqn(qpc);
2753 int scqn = qp_get_scqn(qpc);
2754 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2755 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2756 struct res_srq *srq;
2757 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2759 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2763 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2766 qp->local_qpn = local_qpn;
2767 qp->sched_queue = 0;
2769 qp->vlan_control = 0;
2771 qp->pri_path_fl = 0;
2774 qp->qpc_flags = be32_to_cpu(qpc->flags);
2776 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2780 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2784 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2789 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2796 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2801 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2802 update_pkey_index(dev, slave, inbox);
2803 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2806 atomic_inc(&mtt->ref_count);
2808 atomic_inc(&rcq->ref_count);
2810 atomic_inc(&scq->ref_count);
2814 put_res(dev, slave, scqn, RES_CQ);
2817 atomic_inc(&srq->ref_count);
2818 put_res(dev, slave, srqn, RES_SRQ);
2821 put_res(dev, slave, rcqn, RES_CQ);
2822 put_res(dev, slave, mtt_base, RES_MTT);
2823 res_end_move(dev, slave, RES_QP, qpn);
2829 put_res(dev, slave, srqn, RES_SRQ);
2832 put_res(dev, slave, scqn, RES_CQ);
2834 put_res(dev, slave, rcqn, RES_CQ);
2836 put_res(dev, slave, mtt_base, RES_MTT);
2838 res_abort_move(dev, slave, RES_QP, qpn);
2843 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2845 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2848 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2850 int log_eq_size = eqc->log_eq_size & 0x1f;
2851 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2853 if (log_eq_size + 5 < page_shift)
2856 return 1 << (log_eq_size + 5 - page_shift);
2859 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2861 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2864 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2866 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2867 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2869 if (log_cq_size + 5 < page_shift)
2872 return 1 << (log_cq_size + 5 - page_shift);
2875 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2876 struct mlx4_vhcr *vhcr,
2877 struct mlx4_cmd_mailbox *inbox,
2878 struct mlx4_cmd_mailbox *outbox,
2879 struct mlx4_cmd_info *cmd)
2882 int eqn = vhcr->in_modifier;
2883 int res_id = (slave << 10) | eqn;
2884 struct mlx4_eq_context *eqc = inbox->buf;
2885 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2886 int mtt_size = eq_get_mtt_size(eqc);
2888 struct res_mtt *mtt;
2890 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2893 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2897 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2901 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2905 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2909 atomic_inc(&mtt->ref_count);
2911 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2912 res_end_move(dev, slave, RES_EQ, res_id);
2916 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2918 res_abort_move(dev, slave, RES_EQ, res_id);
2920 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2924 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
2925 struct mlx4_vhcr *vhcr,
2926 struct mlx4_cmd_mailbox *inbox,
2927 struct mlx4_cmd_mailbox *outbox,
2928 struct mlx4_cmd_info *cmd)
2931 u8 get = vhcr->op_modifier;
2936 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2941 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2942 int len, struct res_mtt **res)
2944 struct mlx4_priv *priv = mlx4_priv(dev);
2945 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2946 struct res_mtt *mtt;
2949 spin_lock_irq(mlx4_tlock(dev));
2950 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2952 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2954 mtt->com.from_state = mtt->com.state;
2955 mtt->com.state = RES_MTT_BUSY;
2960 spin_unlock_irq(mlx4_tlock(dev));
2965 static int verify_qp_parameters(struct mlx4_dev *dev,
2966 struct mlx4_vhcr *vhcr,
2967 struct mlx4_cmd_mailbox *inbox,
2968 enum qp_transition transition, u8 slave)
2972 struct mlx4_qp_context *qp_ctx;
2973 enum mlx4_qp_optpar optpar;
2977 qp_ctx = inbox->buf + 8;
2978 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2979 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2981 if (slave != mlx4_master_func_num(dev)) {
2982 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
2983 /* setting QP rate-limit is disallowed for VFs */
2984 if (qp_ctx->rate_limit_params)
2990 case MLX4_QP_ST_XRC:
2992 switch (transition) {
2993 case QP_TRANS_INIT2RTR:
2994 case QP_TRANS_RTR2RTS:
2995 case QP_TRANS_RTS2RTS:
2996 case QP_TRANS_SQD2SQD:
2997 case QP_TRANS_SQD2RTS:
2998 if (slave != mlx4_master_func_num(dev))
2999 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3000 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3001 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3002 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3005 if (qp_ctx->pri_path.mgid_index >= num_gids)
3008 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3009 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3010 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3011 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3014 if (qp_ctx->alt_path.mgid_index >= num_gids)
3023 case MLX4_QP_ST_MLX:
3024 qpn = vhcr->in_modifier & 0x7fffff;
3025 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3026 if (transition == QP_TRANS_INIT2RTR &&
3027 slave != mlx4_master_func_num(dev) &&
3028 mlx4_is_qp_reserved(dev, qpn) &&
3029 !mlx4_vf_smi_enabled(dev, slave, port)) {
3030 /* only enabled VFs may create MLX proxy QPs */
3031 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3032 __func__, slave, port);
3044 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3045 struct mlx4_vhcr *vhcr,
3046 struct mlx4_cmd_mailbox *inbox,
3047 struct mlx4_cmd_mailbox *outbox,
3048 struct mlx4_cmd_info *cmd)
3050 struct mlx4_mtt mtt;
3051 __be64 *page_list = inbox->buf;
3052 u64 *pg_list = (u64 *)page_list;
3054 struct res_mtt *rmtt = NULL;
3055 int start = be64_to_cpu(page_list[0]);
3056 int npages = vhcr->in_modifier;
3059 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3063 /* Call the SW implementation of write_mtt:
3064 * - Prepare a dummy mtt struct
3065 * - Translate inbox contents to simple addresses in host endianness */
3066 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3067 we don't really use it */
3070 for (i = 0; i < npages; ++i)
3071 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3073 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3074 ((u64 *)page_list + 2));
3077 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3082 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3083 struct mlx4_vhcr *vhcr,
3084 struct mlx4_cmd_mailbox *inbox,
3085 struct mlx4_cmd_mailbox *outbox,
3086 struct mlx4_cmd_info *cmd)
3088 int eqn = vhcr->in_modifier;
3089 int res_id = eqn | (slave << 10);
3093 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3097 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3101 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3105 atomic_dec(&eq->mtt->ref_count);
3106 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3107 res_end_move(dev, slave, RES_EQ, res_id);
3108 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3113 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3115 res_abort_move(dev, slave, RES_EQ, res_id);
3120 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3122 struct mlx4_priv *priv = mlx4_priv(dev);
3123 struct mlx4_slave_event_eq_info *event_eq;
3124 struct mlx4_cmd_mailbox *mailbox;
3125 u32 in_modifier = 0;
3130 if (!priv->mfunc.master.slave_state)
3133 /* check for slave valid, slave not PF, and slave active */
3134 if (slave < 0 || slave > dev->persist->num_vfs ||
3135 slave == dev->caps.function ||
3136 !priv->mfunc.master.slave_state[slave].active)
3139 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3141 /* Create the event only if the slave is registered */
3142 if (event_eq->eqn < 0)
3145 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3146 res_id = (slave << 10) | event_eq->eqn;
3147 err = get_res(dev, slave, res_id, RES_EQ, &req);
3151 if (req->com.from_state != RES_EQ_HW) {
3156 mailbox = mlx4_alloc_cmd_mailbox(dev);
3157 if (IS_ERR(mailbox)) {
3158 err = PTR_ERR(mailbox);
3162 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3164 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3167 memcpy(mailbox->buf, (u8 *) eqe, 28);
3169 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3171 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3172 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3175 put_res(dev, slave, res_id, RES_EQ);
3176 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3177 mlx4_free_cmd_mailbox(dev, mailbox);
3181 put_res(dev, slave, res_id, RES_EQ);
3184 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3188 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3189 struct mlx4_vhcr *vhcr,
3190 struct mlx4_cmd_mailbox *inbox,
3191 struct mlx4_cmd_mailbox *outbox,
3192 struct mlx4_cmd_info *cmd)
3194 int eqn = vhcr->in_modifier;
3195 int res_id = eqn | (slave << 10);
3199 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3203 if (eq->com.from_state != RES_EQ_HW) {
3208 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3211 put_res(dev, slave, res_id, RES_EQ);
3215 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3216 struct mlx4_vhcr *vhcr,
3217 struct mlx4_cmd_mailbox *inbox,
3218 struct mlx4_cmd_mailbox *outbox,
3219 struct mlx4_cmd_info *cmd)
3222 int cqn = vhcr->in_modifier;
3223 struct mlx4_cq_context *cqc = inbox->buf;
3224 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3225 struct res_cq *cq = NULL;
3226 struct res_mtt *mtt;
3228 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3231 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3234 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3237 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3240 atomic_inc(&mtt->ref_count);
3242 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3243 res_end_move(dev, slave, RES_CQ, cqn);
3247 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3249 res_abort_move(dev, slave, RES_CQ, cqn);
3253 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3254 struct mlx4_vhcr *vhcr,
3255 struct mlx4_cmd_mailbox *inbox,
3256 struct mlx4_cmd_mailbox *outbox,
3257 struct mlx4_cmd_info *cmd)
3260 int cqn = vhcr->in_modifier;
3261 struct res_cq *cq = NULL;
3263 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3266 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3269 atomic_dec(&cq->mtt->ref_count);
3270 res_end_move(dev, slave, RES_CQ, cqn);
3274 res_abort_move(dev, slave, RES_CQ, cqn);
3278 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3279 struct mlx4_vhcr *vhcr,
3280 struct mlx4_cmd_mailbox *inbox,
3281 struct mlx4_cmd_mailbox *outbox,
3282 struct mlx4_cmd_info *cmd)
3284 int cqn = vhcr->in_modifier;
3288 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3292 if (cq->com.from_state != RES_CQ_HW)
3295 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3297 put_res(dev, slave, cqn, RES_CQ);
3302 static int handle_resize(struct mlx4_dev *dev, int slave,
3303 struct mlx4_vhcr *vhcr,
3304 struct mlx4_cmd_mailbox *inbox,
3305 struct mlx4_cmd_mailbox *outbox,
3306 struct mlx4_cmd_info *cmd,
3310 struct res_mtt *orig_mtt;
3311 struct res_mtt *mtt;
3312 struct mlx4_cq_context *cqc = inbox->buf;
3313 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3315 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3319 if (orig_mtt != cq->mtt) {
3324 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3328 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3331 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3334 atomic_dec(&orig_mtt->ref_count);
3335 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3336 atomic_inc(&mtt->ref_count);
3338 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3342 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3344 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3350 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3351 struct mlx4_vhcr *vhcr,
3352 struct mlx4_cmd_mailbox *inbox,
3353 struct mlx4_cmd_mailbox *outbox,
3354 struct mlx4_cmd_info *cmd)
3356 int cqn = vhcr->in_modifier;
3360 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3364 if (cq->com.from_state != RES_CQ_HW)
3367 if (vhcr->op_modifier == 0) {
3368 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3372 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3374 put_res(dev, slave, cqn, RES_CQ);
3379 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3381 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3382 int log_rq_stride = srqc->logstride & 7;
3383 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3385 if (log_srq_size + log_rq_stride + 4 < page_shift)
3388 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3391 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3392 struct mlx4_vhcr *vhcr,
3393 struct mlx4_cmd_mailbox *inbox,
3394 struct mlx4_cmd_mailbox *outbox,
3395 struct mlx4_cmd_info *cmd)
3398 int srqn = vhcr->in_modifier;
3399 struct res_mtt *mtt;
3400 struct res_srq *srq = NULL;
3401 struct mlx4_srq_context *srqc = inbox->buf;
3402 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3404 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3407 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3410 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3413 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3418 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3422 atomic_inc(&mtt->ref_count);
3424 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3425 res_end_move(dev, slave, RES_SRQ, srqn);
3429 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3431 res_abort_move(dev, slave, RES_SRQ, srqn);
3436 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3437 struct mlx4_vhcr *vhcr,
3438 struct mlx4_cmd_mailbox *inbox,
3439 struct mlx4_cmd_mailbox *outbox,
3440 struct mlx4_cmd_info *cmd)
3443 int srqn = vhcr->in_modifier;
3444 struct res_srq *srq = NULL;
3446 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3449 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3452 atomic_dec(&srq->mtt->ref_count);
3454 atomic_dec(&srq->cq->ref_count);
3455 res_end_move(dev, slave, RES_SRQ, srqn);
3460 res_abort_move(dev, slave, RES_SRQ, srqn);
3465 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3466 struct mlx4_vhcr *vhcr,
3467 struct mlx4_cmd_mailbox *inbox,
3468 struct mlx4_cmd_mailbox *outbox,
3469 struct mlx4_cmd_info *cmd)
3472 int srqn = vhcr->in_modifier;
3473 struct res_srq *srq;
3475 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3478 if (srq->com.from_state != RES_SRQ_HW) {
3482 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3484 put_res(dev, slave, srqn, RES_SRQ);
3488 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3489 struct mlx4_vhcr *vhcr,
3490 struct mlx4_cmd_mailbox *inbox,
3491 struct mlx4_cmd_mailbox *outbox,
3492 struct mlx4_cmd_info *cmd)
3495 int srqn = vhcr->in_modifier;
3496 struct res_srq *srq;
3498 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3502 if (srq->com.from_state != RES_SRQ_HW) {
3507 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3509 put_res(dev, slave, srqn, RES_SRQ);
3513 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3514 struct mlx4_vhcr *vhcr,
3515 struct mlx4_cmd_mailbox *inbox,
3516 struct mlx4_cmd_mailbox *outbox,
3517 struct mlx4_cmd_info *cmd)
3520 int qpn = vhcr->in_modifier & 0x7fffff;
3523 err = get_res(dev, slave, qpn, RES_QP, &qp);
3526 if (qp->com.from_state != RES_QP_HW) {
3531 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3533 put_res(dev, slave, qpn, RES_QP);
3537 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3538 struct mlx4_vhcr *vhcr,
3539 struct mlx4_cmd_mailbox *inbox,
3540 struct mlx4_cmd_mailbox *outbox,
3541 struct mlx4_cmd_info *cmd)
3543 struct mlx4_qp_context *context = inbox->buf + 8;
3544 adjust_proxy_tun_qkey(dev, vhcr, context);
3545 update_pkey_index(dev, slave, inbox);
3546 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3549 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3550 struct mlx4_qp_context *qpc,
3551 struct mlx4_cmd_mailbox *inbox)
3553 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3555 int port = mlx4_slave_convert_port(
3556 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3561 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3564 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3565 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3566 qpc->pri_path.sched_queue = pri_sched_queue;
3569 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3570 port = mlx4_slave_convert_port(
3571 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3575 qpc->alt_path.sched_queue =
3576 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3582 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3583 struct mlx4_qp_context *qpc,
3584 struct mlx4_cmd_mailbox *inbox)
3588 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3589 u8 sched = *(u8 *)(inbox->buf + 64);
3592 port = (sched >> 6 & 1) + 1;
3593 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3594 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3595 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3601 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3602 struct mlx4_vhcr *vhcr,
3603 struct mlx4_cmd_mailbox *inbox,
3604 struct mlx4_cmd_mailbox *outbox,
3605 struct mlx4_cmd_info *cmd)
3608 struct mlx4_qp_context *qpc = inbox->buf + 8;
3609 int qpn = vhcr->in_modifier & 0x7fffff;
3611 u8 orig_sched_queue;
3612 __be32 orig_param3 = qpc->param3;
3613 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3614 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3615 u8 orig_pri_path_fl = qpc->pri_path.fl;
3616 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3617 u8 orig_feup = qpc->pri_path.feup;
3619 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3622 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3626 if (roce_verify_mac(dev, slave, qpc, inbox))
3629 update_pkey_index(dev, slave, inbox);
3630 update_gid(dev, inbox, (u8)slave);
3631 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3632 orig_sched_queue = qpc->pri_path.sched_queue;
3633 err = update_vport_qp_param(dev, inbox, slave, qpn);
3637 err = get_res(dev, slave, qpn, RES_QP, &qp);
3640 if (qp->com.from_state != RES_QP_HW) {
3645 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3647 /* if no error, save sched queue value passed in by VF. This is
3648 * essentially the QOS value provided by the VF. This will be useful
3649 * if we allow dynamic changes from VST back to VGT
3652 qp->sched_queue = orig_sched_queue;
3653 qp->param3 = orig_param3;
3654 qp->vlan_control = orig_vlan_control;
3655 qp->fvl_rx = orig_fvl_rx;
3656 qp->pri_path_fl = orig_pri_path_fl;
3657 qp->vlan_index = orig_vlan_index;
3658 qp->feup = orig_feup;
3660 put_res(dev, slave, qpn, RES_QP);
3664 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3665 struct mlx4_vhcr *vhcr,
3666 struct mlx4_cmd_mailbox *inbox,
3667 struct mlx4_cmd_mailbox *outbox,
3668 struct mlx4_cmd_info *cmd)
3671 struct mlx4_qp_context *context = inbox->buf + 8;
3673 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3676 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3680 update_pkey_index(dev, slave, inbox);
3681 update_gid(dev, inbox, (u8)slave);
3682 adjust_proxy_tun_qkey(dev, vhcr, context);
3683 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3686 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3687 struct mlx4_vhcr *vhcr,
3688 struct mlx4_cmd_mailbox *inbox,
3689 struct mlx4_cmd_mailbox *outbox,
3690 struct mlx4_cmd_info *cmd)
3693 struct mlx4_qp_context *context = inbox->buf + 8;
3695 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3698 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3702 update_pkey_index(dev, slave, inbox);
3703 update_gid(dev, inbox, (u8)slave);
3704 adjust_proxy_tun_qkey(dev, vhcr, context);
3705 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3709 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3710 struct mlx4_vhcr *vhcr,
3711 struct mlx4_cmd_mailbox *inbox,
3712 struct mlx4_cmd_mailbox *outbox,
3713 struct mlx4_cmd_info *cmd)
3715 struct mlx4_qp_context *context = inbox->buf + 8;
3716 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3719 adjust_proxy_tun_qkey(dev, vhcr, context);
3720 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3723 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3724 struct mlx4_vhcr *vhcr,
3725 struct mlx4_cmd_mailbox *inbox,
3726 struct mlx4_cmd_mailbox *outbox,
3727 struct mlx4_cmd_info *cmd)
3730 struct mlx4_qp_context *context = inbox->buf + 8;
3732 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3735 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3739 adjust_proxy_tun_qkey(dev, vhcr, context);
3740 update_gid(dev, inbox, (u8)slave);
3741 update_pkey_index(dev, slave, inbox);
3742 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3745 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3746 struct mlx4_vhcr *vhcr,
3747 struct mlx4_cmd_mailbox *inbox,
3748 struct mlx4_cmd_mailbox *outbox,
3749 struct mlx4_cmd_info *cmd)
3752 struct mlx4_qp_context *context = inbox->buf + 8;
3754 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3757 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3761 adjust_proxy_tun_qkey(dev, vhcr, context);
3762 update_gid(dev, inbox, (u8)slave);
3763 update_pkey_index(dev, slave, inbox);
3764 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3767 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3768 struct mlx4_vhcr *vhcr,
3769 struct mlx4_cmd_mailbox *inbox,
3770 struct mlx4_cmd_mailbox *outbox,
3771 struct mlx4_cmd_info *cmd)
3774 int qpn = vhcr->in_modifier & 0x7fffff;
3777 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3780 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3784 atomic_dec(&qp->mtt->ref_count);
3785 atomic_dec(&qp->rcq->ref_count);
3786 atomic_dec(&qp->scq->ref_count);
3788 atomic_dec(&qp->srq->ref_count);
3789 res_end_move(dev, slave, RES_QP, qpn);
3793 res_abort_move(dev, slave, RES_QP, qpn);
3798 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3799 struct res_qp *rqp, u8 *gid)
3801 struct res_gid *res;
3803 list_for_each_entry(res, &rqp->mcg_list, list) {
3804 if (!memcmp(res->gid, gid, 16))
3810 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3811 u8 *gid, enum mlx4_protocol prot,
3812 enum mlx4_steer_type steer, u64 reg_id)
3814 struct res_gid *res;
3817 res = kzalloc(sizeof *res, GFP_KERNEL);
3821 spin_lock_irq(&rqp->mcg_spl);
3822 if (find_gid(dev, slave, rqp, gid)) {
3826 memcpy(res->gid, gid, 16);
3829 res->reg_id = reg_id;
3830 list_add_tail(&res->list, &rqp->mcg_list);
3833 spin_unlock_irq(&rqp->mcg_spl);
3838 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3839 u8 *gid, enum mlx4_protocol prot,
3840 enum mlx4_steer_type steer, u64 *reg_id)
3842 struct res_gid *res;
3845 spin_lock_irq(&rqp->mcg_spl);
3846 res = find_gid(dev, slave, rqp, gid);
3847 if (!res || res->prot != prot || res->steer != steer)
3850 *reg_id = res->reg_id;
3851 list_del(&res->list);
3855 spin_unlock_irq(&rqp->mcg_spl);
3860 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3861 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3862 enum mlx4_steer_type type, u64 *reg_id)
3864 switch (dev->caps.steering_mode) {
3865 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3866 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3869 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3870 block_loopback, prot,
3873 case MLX4_STEERING_MODE_B0:
3874 if (prot == MLX4_PROT_ETH) {
3875 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3880 return mlx4_qp_attach_common(dev, qp, gid,
3881 block_loopback, prot, type);
3887 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3888 u8 gid[16], enum mlx4_protocol prot,
3889 enum mlx4_steer_type type, u64 reg_id)
3891 switch (dev->caps.steering_mode) {
3892 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3893 return mlx4_flow_detach(dev, reg_id);
3894 case MLX4_STEERING_MODE_B0:
3895 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3901 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3902 u8 *gid, enum mlx4_protocol prot)
3906 if (prot != MLX4_PROT_ETH)
3909 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3910 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3911 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3920 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3921 struct mlx4_vhcr *vhcr,
3922 struct mlx4_cmd_mailbox *inbox,
3923 struct mlx4_cmd_mailbox *outbox,
3924 struct mlx4_cmd_info *cmd)
3926 struct mlx4_qp qp; /* dummy for calling attach/detach */
3927 u8 *gid = inbox->buf;
3928 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3933 int attach = vhcr->op_modifier;
3934 int block_loopback = vhcr->in_modifier >> 31;
3935 u8 steer_type_mask = 2;
3936 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3938 qpn = vhcr->in_modifier & 0xffffff;
3939 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3945 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3948 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3951 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3955 err = mlx4_adjust_port(dev, slave, gid, prot);
3959 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3963 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3965 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3968 put_res(dev, slave, qpn, RES_QP);
3972 qp_detach(dev, &qp, gid, prot, type, reg_id);
3974 put_res(dev, slave, qpn, RES_QP);
3979 * MAC validation for Flow Steering rules.
3980 * VF can attach rules only with a mac address which is assigned to it.
3982 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3983 struct list_head *rlist)
3985 struct mac_res *res, *tmp;
3988 /* make sure it isn't multicast or broadcast mac*/
3989 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3990 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3991 list_for_each_entry_safe(res, tmp, rlist, list) {
3992 be_mac = cpu_to_be64(res->mac << 16);
3993 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3996 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3997 eth_header->eth.dst_mac, slave);
4003 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4004 struct _rule_hw *eth_header)
4006 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4007 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4008 struct mlx4_net_trans_rule_hw_eth *eth =
4009 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4010 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4011 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4012 next_rule->rsvd == 0;
4015 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4020 * In case of missing eth header, append eth header with a MAC address
4021 * assigned to the VF.
4023 static int add_eth_header(struct mlx4_dev *dev, int slave,
4024 struct mlx4_cmd_mailbox *inbox,
4025 struct list_head *rlist, int header_id)
4027 struct mac_res *res, *tmp;
4029 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4030 struct mlx4_net_trans_rule_hw_eth *eth_header;
4031 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4032 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4034 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4036 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4038 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4040 /* Clear a space in the inbox for eth header */
4041 switch (header_id) {
4042 case MLX4_NET_TRANS_RULE_ID_IPV4:
4044 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4045 memmove(ip_header, eth_header,
4046 sizeof(*ip_header) + sizeof(*l4_header));
4048 case MLX4_NET_TRANS_RULE_ID_TCP:
4049 case MLX4_NET_TRANS_RULE_ID_UDP:
4050 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4052 memmove(l4_header, eth_header, sizeof(*l4_header));
4057 list_for_each_entry_safe(res, tmp, rlist, list) {
4058 if (port == res->port) {
4059 be_mac = cpu_to_be64(res->mac << 16);
4064 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4069 memset(eth_header, 0, sizeof(*eth_header));
4070 eth_header->size = sizeof(*eth_header) >> 2;
4071 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4072 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4073 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4079 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4080 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4081 struct mlx4_vhcr *vhcr,
4082 struct mlx4_cmd_mailbox *inbox,
4083 struct mlx4_cmd_mailbox *outbox,
4084 struct mlx4_cmd_info *cmd_info)
4087 u32 qpn = vhcr->in_modifier & 0xffffff;
4091 u64 pri_addr_path_mask;
4092 struct mlx4_update_qp_context *cmd;
4095 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4097 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4098 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4099 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4102 /* Just change the smac for the QP */
4103 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4105 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4109 port = (rqp->sched_queue >> 6 & 1) + 1;
4111 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4112 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4113 err = mac_find_smac_ix_in_slave(dev, slave, port,
4117 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4123 err = mlx4_cmd(dev, inbox->dma,
4124 vhcr->in_modifier, 0,
4125 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4128 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4133 put_res(dev, slave, qpn, RES_QP);
4137 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4138 struct mlx4_vhcr *vhcr,
4139 struct mlx4_cmd_mailbox *inbox,
4140 struct mlx4_cmd_mailbox *outbox,
4141 struct mlx4_cmd_info *cmd)
4144 struct mlx4_priv *priv = mlx4_priv(dev);
4145 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4146 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4150 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4151 struct _rule_hw *rule_header;
4154 if (dev->caps.steering_mode !=
4155 MLX4_STEERING_MODE_DEVICE_MANAGED)
4158 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4159 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4160 if (ctrl->port <= 0)
4162 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4163 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4165 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4168 rule_header = (struct _rule_hw *)(ctrl + 1);
4169 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4171 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4172 handle_eth_header_mcast_prio(ctrl, rule_header);
4174 if (slave == dev->caps.function)
4177 switch (header_id) {
4178 case MLX4_NET_TRANS_RULE_ID_ETH:
4179 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4184 case MLX4_NET_TRANS_RULE_ID_IB:
4186 case MLX4_NET_TRANS_RULE_ID_IPV4:
4187 case MLX4_NET_TRANS_RULE_ID_TCP:
4188 case MLX4_NET_TRANS_RULE_ID_UDP:
4189 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4190 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4194 vhcr->in_modifier +=
4195 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4198 pr_err("Corrupted mailbox\n");
4204 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4205 vhcr->in_modifier, 0,
4206 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4211 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4213 mlx4_err(dev, "Fail to add flow steering resources\n");
4215 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4216 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4220 atomic_inc(&rqp->ref_count);
4222 put_res(dev, slave, qpn, RES_QP);
4226 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4227 struct mlx4_vhcr *vhcr,
4228 struct mlx4_cmd_mailbox *inbox,
4229 struct mlx4_cmd_mailbox *outbox,
4230 struct mlx4_cmd_info *cmd)
4234 struct res_fs_rule *rrule;
4236 if (dev->caps.steering_mode !=
4237 MLX4_STEERING_MODE_DEVICE_MANAGED)
4240 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4243 /* Release the rule form busy state before removal */
4244 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4245 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4249 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4251 mlx4_err(dev, "Fail to remove flow steering resources\n");
4255 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4256 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4259 atomic_dec(&rqp->ref_count);
4261 put_res(dev, slave, rrule->qpn, RES_QP);
4266 BUSY_MAX_RETRIES = 10
4269 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4270 struct mlx4_vhcr *vhcr,
4271 struct mlx4_cmd_mailbox *inbox,
4272 struct mlx4_cmd_mailbox *outbox,
4273 struct mlx4_cmd_info *cmd)
4276 int index = vhcr->in_modifier & 0xffff;
4278 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4282 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4283 put_res(dev, slave, index, RES_COUNTER);
4287 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4289 struct res_gid *rgid;
4290 struct res_gid *tmp;
4291 struct mlx4_qp qp; /* dummy for calling attach/detach */
4293 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4294 switch (dev->caps.steering_mode) {
4295 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4296 mlx4_flow_detach(dev, rgid->reg_id);
4298 case MLX4_STEERING_MODE_B0:
4299 qp.qpn = rqp->local_qpn;
4300 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4301 rgid->prot, rgid->steer);
4304 list_del(&rgid->list);
4309 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4310 enum mlx4_resource type, int print)
4312 struct mlx4_priv *priv = mlx4_priv(dev);
4313 struct mlx4_resource_tracker *tracker =
4314 &priv->mfunc.master.res_tracker;
4315 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4316 struct res_common *r;
4317 struct res_common *tmp;
4321 spin_lock_irq(mlx4_tlock(dev));
4322 list_for_each_entry_safe(r, tmp, rlist, list) {
4323 if (r->owner == slave) {
4325 if (r->state == RES_ANY_BUSY) {
4328 "%s id 0x%llx is busy\n",
4333 r->from_state = r->state;
4334 r->state = RES_ANY_BUSY;
4340 spin_unlock_irq(mlx4_tlock(dev));
4345 static int move_all_busy(struct mlx4_dev *dev, int slave,
4346 enum mlx4_resource type)
4348 unsigned long begin;
4353 busy = _move_all_busy(dev, slave, type, 0);
4354 if (time_after(jiffies, begin + 5 * HZ))
4361 busy = _move_all_busy(dev, slave, type, 1);
4365 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4367 struct mlx4_priv *priv = mlx4_priv(dev);
4368 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4369 struct list_head *qp_list =
4370 &tracker->slave_list[slave].res_list[RES_QP];
4378 err = move_all_busy(dev, slave, RES_QP);
4380 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4383 spin_lock_irq(mlx4_tlock(dev));
4384 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4385 spin_unlock_irq(mlx4_tlock(dev));
4386 if (qp->com.owner == slave) {
4387 qpn = qp->com.res_id;
4388 detach_qp(dev, slave, qp);
4389 state = qp->com.from_state;
4390 while (state != 0) {
4392 case RES_QP_RESERVED:
4393 spin_lock_irq(mlx4_tlock(dev));
4394 rb_erase(&qp->com.node,
4395 &tracker->res_tree[RES_QP]);
4396 list_del(&qp->com.list);
4397 spin_unlock_irq(mlx4_tlock(dev));
4398 if (!valid_reserved(dev, slave, qpn)) {
4399 __mlx4_qp_release_range(dev, qpn, 1);
4400 mlx4_release_resource(dev, slave,
4407 if (!valid_reserved(dev, slave, qpn))
4408 __mlx4_qp_free_icm(dev, qpn);
4409 state = RES_QP_RESERVED;
4413 err = mlx4_cmd(dev, in_param,
4416 MLX4_CMD_TIME_CLASS_A,
4419 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4420 slave, qp->local_qpn);
4421 atomic_dec(&qp->rcq->ref_count);
4422 atomic_dec(&qp->scq->ref_count);
4423 atomic_dec(&qp->mtt->ref_count);
4425 atomic_dec(&qp->srq->ref_count);
4426 state = RES_QP_MAPPED;
4433 spin_lock_irq(mlx4_tlock(dev));
4435 spin_unlock_irq(mlx4_tlock(dev));
4438 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4440 struct mlx4_priv *priv = mlx4_priv(dev);
4441 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4442 struct list_head *srq_list =
4443 &tracker->slave_list[slave].res_list[RES_SRQ];
4444 struct res_srq *srq;
4445 struct res_srq *tmp;
4452 err = move_all_busy(dev, slave, RES_SRQ);
4454 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4457 spin_lock_irq(mlx4_tlock(dev));
4458 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4459 spin_unlock_irq(mlx4_tlock(dev));
4460 if (srq->com.owner == slave) {
4461 srqn = srq->com.res_id;
4462 state = srq->com.from_state;
4463 while (state != 0) {
4465 case RES_SRQ_ALLOCATED:
4466 __mlx4_srq_free_icm(dev, srqn);
4467 spin_lock_irq(mlx4_tlock(dev));
4468 rb_erase(&srq->com.node,
4469 &tracker->res_tree[RES_SRQ]);
4470 list_del(&srq->com.list);
4471 spin_unlock_irq(mlx4_tlock(dev));
4472 mlx4_release_resource(dev, slave,
4480 err = mlx4_cmd(dev, in_param, srqn, 1,
4482 MLX4_CMD_TIME_CLASS_A,
4485 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4488 atomic_dec(&srq->mtt->ref_count);
4490 atomic_dec(&srq->cq->ref_count);
4491 state = RES_SRQ_ALLOCATED;
4499 spin_lock_irq(mlx4_tlock(dev));
4501 spin_unlock_irq(mlx4_tlock(dev));
4504 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4506 struct mlx4_priv *priv = mlx4_priv(dev);
4507 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4508 struct list_head *cq_list =
4509 &tracker->slave_list[slave].res_list[RES_CQ];
4518 err = move_all_busy(dev, slave, RES_CQ);
4520 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4523 spin_lock_irq(mlx4_tlock(dev));
4524 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4525 spin_unlock_irq(mlx4_tlock(dev));
4526 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4527 cqn = cq->com.res_id;
4528 state = cq->com.from_state;
4529 while (state != 0) {
4531 case RES_CQ_ALLOCATED:
4532 __mlx4_cq_free_icm(dev, cqn);
4533 spin_lock_irq(mlx4_tlock(dev));
4534 rb_erase(&cq->com.node,
4535 &tracker->res_tree[RES_CQ]);
4536 list_del(&cq->com.list);
4537 spin_unlock_irq(mlx4_tlock(dev));
4538 mlx4_release_resource(dev, slave,
4546 err = mlx4_cmd(dev, in_param, cqn, 1,
4548 MLX4_CMD_TIME_CLASS_A,
4551 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4553 atomic_dec(&cq->mtt->ref_count);
4554 state = RES_CQ_ALLOCATED;
4562 spin_lock_irq(mlx4_tlock(dev));
4564 spin_unlock_irq(mlx4_tlock(dev));
4567 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4569 struct mlx4_priv *priv = mlx4_priv(dev);
4570 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4571 struct list_head *mpt_list =
4572 &tracker->slave_list[slave].res_list[RES_MPT];
4573 struct res_mpt *mpt;
4574 struct res_mpt *tmp;
4581 err = move_all_busy(dev, slave, RES_MPT);
4583 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4586 spin_lock_irq(mlx4_tlock(dev));
4587 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4588 spin_unlock_irq(mlx4_tlock(dev));
4589 if (mpt->com.owner == slave) {
4590 mptn = mpt->com.res_id;
4591 state = mpt->com.from_state;
4592 while (state != 0) {
4594 case RES_MPT_RESERVED:
4595 __mlx4_mpt_release(dev, mpt->key);
4596 spin_lock_irq(mlx4_tlock(dev));
4597 rb_erase(&mpt->com.node,
4598 &tracker->res_tree[RES_MPT]);
4599 list_del(&mpt->com.list);
4600 spin_unlock_irq(mlx4_tlock(dev));
4601 mlx4_release_resource(dev, slave,
4607 case RES_MPT_MAPPED:
4608 __mlx4_mpt_free_icm(dev, mpt->key);
4609 state = RES_MPT_RESERVED;
4614 err = mlx4_cmd(dev, in_param, mptn, 0,
4616 MLX4_CMD_TIME_CLASS_A,
4619 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4622 atomic_dec(&mpt->mtt->ref_count);
4623 state = RES_MPT_MAPPED;
4630 spin_lock_irq(mlx4_tlock(dev));
4632 spin_unlock_irq(mlx4_tlock(dev));
4635 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4637 struct mlx4_priv *priv = mlx4_priv(dev);
4638 struct mlx4_resource_tracker *tracker =
4639 &priv->mfunc.master.res_tracker;
4640 struct list_head *mtt_list =
4641 &tracker->slave_list[slave].res_list[RES_MTT];
4642 struct res_mtt *mtt;
4643 struct res_mtt *tmp;
4649 err = move_all_busy(dev, slave, RES_MTT);
4651 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4654 spin_lock_irq(mlx4_tlock(dev));
4655 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4656 spin_unlock_irq(mlx4_tlock(dev));
4657 if (mtt->com.owner == slave) {
4658 base = mtt->com.res_id;
4659 state = mtt->com.from_state;
4660 while (state != 0) {
4662 case RES_MTT_ALLOCATED:
4663 __mlx4_free_mtt_range(dev, base,
4665 spin_lock_irq(mlx4_tlock(dev));
4666 rb_erase(&mtt->com.node,
4667 &tracker->res_tree[RES_MTT]);
4668 list_del(&mtt->com.list);
4669 spin_unlock_irq(mlx4_tlock(dev));
4670 mlx4_release_resource(dev, slave, RES_MTT,
4671 1 << mtt->order, 0);
4681 spin_lock_irq(mlx4_tlock(dev));
4683 spin_unlock_irq(mlx4_tlock(dev));
4686 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4688 struct mlx4_priv *priv = mlx4_priv(dev);
4689 struct mlx4_resource_tracker *tracker =
4690 &priv->mfunc.master.res_tracker;
4691 struct list_head *fs_rule_list =
4692 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4693 struct res_fs_rule *fs_rule;
4694 struct res_fs_rule *tmp;
4699 err = move_all_busy(dev, slave, RES_FS_RULE);
4701 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4704 spin_lock_irq(mlx4_tlock(dev));
4705 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4706 spin_unlock_irq(mlx4_tlock(dev));
4707 if (fs_rule->com.owner == slave) {
4708 base = fs_rule->com.res_id;
4709 state = fs_rule->com.from_state;
4710 while (state != 0) {
4712 case RES_FS_RULE_ALLOCATED:
4714 err = mlx4_cmd(dev, base, 0, 0,
4715 MLX4_QP_FLOW_STEERING_DETACH,
4716 MLX4_CMD_TIME_CLASS_A,
4719 spin_lock_irq(mlx4_tlock(dev));
4720 rb_erase(&fs_rule->com.node,
4721 &tracker->res_tree[RES_FS_RULE]);
4722 list_del(&fs_rule->com.list);
4723 spin_unlock_irq(mlx4_tlock(dev));
4733 spin_lock_irq(mlx4_tlock(dev));
4735 spin_unlock_irq(mlx4_tlock(dev));
4738 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4740 struct mlx4_priv *priv = mlx4_priv(dev);
4741 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4742 struct list_head *eq_list =
4743 &tracker->slave_list[slave].res_list[RES_EQ];
4751 err = move_all_busy(dev, slave, RES_EQ);
4753 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4756 spin_lock_irq(mlx4_tlock(dev));
4757 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4758 spin_unlock_irq(mlx4_tlock(dev));
4759 if (eq->com.owner == slave) {
4760 eqn = eq->com.res_id;
4761 state = eq->com.from_state;
4762 while (state != 0) {
4764 case RES_EQ_RESERVED:
4765 spin_lock_irq(mlx4_tlock(dev));
4766 rb_erase(&eq->com.node,
4767 &tracker->res_tree[RES_EQ]);
4768 list_del(&eq->com.list);
4769 spin_unlock_irq(mlx4_tlock(dev));
4775 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
4776 1, MLX4_CMD_HW2SW_EQ,
4777 MLX4_CMD_TIME_CLASS_A,
4780 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4781 slave, eqn & 0x3ff);
4782 atomic_dec(&eq->mtt->ref_count);
4783 state = RES_EQ_RESERVED;
4791 spin_lock_irq(mlx4_tlock(dev));
4793 spin_unlock_irq(mlx4_tlock(dev));
4796 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4798 struct mlx4_priv *priv = mlx4_priv(dev);
4799 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4800 struct list_head *counter_list =
4801 &tracker->slave_list[slave].res_list[RES_COUNTER];
4802 struct res_counter *counter;
4803 struct res_counter *tmp;
4807 err = move_all_busy(dev, slave, RES_COUNTER);
4809 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4812 spin_lock_irq(mlx4_tlock(dev));
4813 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4814 if (counter->com.owner == slave) {
4815 index = counter->com.res_id;
4816 rb_erase(&counter->com.node,
4817 &tracker->res_tree[RES_COUNTER]);
4818 list_del(&counter->com.list);
4820 __mlx4_counter_free(dev, index);
4821 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4824 spin_unlock_irq(mlx4_tlock(dev));
4827 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4829 struct mlx4_priv *priv = mlx4_priv(dev);
4830 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4831 struct list_head *xrcdn_list =
4832 &tracker->slave_list[slave].res_list[RES_XRCD];
4833 struct res_xrcdn *xrcd;
4834 struct res_xrcdn *tmp;
4838 err = move_all_busy(dev, slave, RES_XRCD);
4840 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4843 spin_lock_irq(mlx4_tlock(dev));
4844 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4845 if (xrcd->com.owner == slave) {
4846 xrcdn = xrcd->com.res_id;
4847 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4848 list_del(&xrcd->com.list);
4850 __mlx4_xrcd_free(dev, xrcdn);
4853 spin_unlock_irq(mlx4_tlock(dev));
4856 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4858 struct mlx4_priv *priv = mlx4_priv(dev);
4859 mlx4_reset_roce_gids(dev, slave);
4860 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4861 rem_slave_vlans(dev, slave);
4862 rem_slave_macs(dev, slave);
4863 rem_slave_fs_rule(dev, slave);
4864 rem_slave_qps(dev, slave);
4865 rem_slave_srqs(dev, slave);
4866 rem_slave_cqs(dev, slave);
4867 rem_slave_mrs(dev, slave);
4868 rem_slave_eqs(dev, slave);
4869 rem_slave_mtts(dev, slave);
4870 rem_slave_counters(dev, slave);
4871 rem_slave_xrcdns(dev, slave);
4872 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4875 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4877 struct mlx4_vf_immed_vlan_work *work =
4878 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4879 struct mlx4_cmd_mailbox *mailbox;
4880 struct mlx4_update_qp_context *upd_context;
4881 struct mlx4_dev *dev = &work->priv->dev;
4882 struct mlx4_resource_tracker *tracker =
4883 &work->priv->mfunc.master.res_tracker;
4884 struct list_head *qp_list =
4885 &tracker->slave_list[work->slave].res_list[RES_QP];
4888 u64 qp_path_mask_vlan_ctrl =
4889 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4890 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4891 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4892 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4893 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4894 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4896 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4897 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4898 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4899 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4900 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4901 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4902 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4905 int port, errors = 0;
4908 if (mlx4_is_slave(dev)) {
4909 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4914 mailbox = mlx4_alloc_cmd_mailbox(dev);
4915 if (IS_ERR(mailbox))
4917 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4918 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4919 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4920 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4921 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4922 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4923 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4924 else if (!work->vlan_id)
4925 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4926 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4928 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4929 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4930 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4932 upd_context = mailbox->buf;
4933 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4935 spin_lock_irq(mlx4_tlock(dev));
4936 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4937 spin_unlock_irq(mlx4_tlock(dev));
4938 if (qp->com.owner == work->slave) {
4939 if (qp->com.from_state != RES_QP_HW ||
4940 !qp->sched_queue || /* no INIT2RTR trans yet */
4941 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4942 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4943 spin_lock_irq(mlx4_tlock(dev));
4946 port = (qp->sched_queue >> 6 & 1) + 1;
4947 if (port != work->port) {
4948 spin_lock_irq(mlx4_tlock(dev));
4951 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4952 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4954 upd_context->primary_addr_path_mask =
4955 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4956 if (work->vlan_id == MLX4_VGT) {
4957 upd_context->qp_context.param3 = qp->param3;
4958 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4959 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4960 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4961 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4962 upd_context->qp_context.pri_path.feup = qp->feup;
4963 upd_context->qp_context.pri_path.sched_queue =
4966 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4967 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4968 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4969 upd_context->qp_context.pri_path.fvl_rx =
4970 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4971 upd_context->qp_context.pri_path.fl =
4972 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4973 upd_context->qp_context.pri_path.feup =
4974 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4975 upd_context->qp_context.pri_path.sched_queue =
4976 qp->sched_queue & 0xC7;
4977 upd_context->qp_context.pri_path.sched_queue |=
4978 ((work->qos & 0x7) << 3);
4979 upd_context->qp_mask |=
4981 MLX4_UPD_QP_MASK_QOS_VPP);
4982 upd_context->qp_context.qos_vport =
4986 err = mlx4_cmd(dev, mailbox->dma,
4987 qp->local_qpn & 0xffffff,
4988 0, MLX4_CMD_UPDATE_QP,
4989 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4991 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4992 work->slave, port, qp->local_qpn, err);
4996 spin_lock_irq(mlx4_tlock(dev));
4998 spin_unlock_irq(mlx4_tlock(dev));
4999 mlx4_free_cmd_mailbox(dev, mailbox);
5002 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5003 errors, work->slave, work->port);
5005 /* unregister previous vlan_id if needed and we had no errors
5006 * while updating the QPs
5008 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5009 NO_INDX != work->orig_vlan_ix)
5010 __mlx4_unregister_vlan(&work->priv->dev, work->port,
5011 work->orig_vlan_id);