2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
59 struct list_head list;
74 struct list_head list;
76 enum mlx4_protocol prot;
77 enum mlx4_steer_type steer;
82 RES_QP_BUSY = RES_ANY_BUSY,
84 /* QP number was allocated */
87 /* ICM memory for QP context was mapped */
90 /* QP is in hw ownership */
95 struct res_common com;
100 struct list_head mcg_list;
106 enum res_mtt_states {
107 RES_MTT_BUSY = RES_ANY_BUSY,
111 static inline const char *mtt_states_str(enum res_mtt_states state)
114 case RES_MTT_BUSY: return "RES_MTT_BUSY";
115 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
116 default: return "Unknown";
121 struct res_common com;
126 enum res_mpt_states {
127 RES_MPT_BUSY = RES_ANY_BUSY,
134 struct res_common com;
140 RES_EQ_BUSY = RES_ANY_BUSY,
146 struct res_common com;
151 RES_CQ_BUSY = RES_ANY_BUSY,
157 struct res_common com;
162 enum res_srq_states {
163 RES_SRQ_BUSY = RES_ANY_BUSY,
169 struct res_common com;
175 enum res_counter_states {
176 RES_COUNTER_BUSY = RES_ANY_BUSY,
177 RES_COUNTER_ALLOCATED,
181 struct res_common com;
185 enum res_xrcdn_states {
186 RES_XRCD_BUSY = RES_ANY_BUSY,
191 struct res_common com;
195 enum res_fs_rule_states {
196 RES_FS_RULE_BUSY = RES_ANY_BUSY,
197 RES_FS_RULE_ALLOCATED,
201 struct res_common com;
205 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
207 struct rb_node *node = root->rb_node;
210 struct res_common *res = container_of(node, struct res_common,
213 if (res_id < res->res_id)
214 node = node->rb_left;
215 else if (res_id > res->res_id)
216 node = node->rb_right;
223 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
225 struct rb_node **new = &(root->rb_node), *parent = NULL;
227 /* Figure out where to put new node */
229 struct res_common *this = container_of(*new, struct res_common,
233 if (res->res_id < this->res_id)
234 new = &((*new)->rb_left);
235 else if (res->res_id > this->res_id)
236 new = &((*new)->rb_right);
241 /* Add new node and rebalance tree. */
242 rb_link_node(&res->node, parent, new);
243 rb_insert_color(&res->node, root);
258 static const char *ResourceType(enum mlx4_resource rt)
261 case RES_QP: return "RES_QP";
262 case RES_CQ: return "RES_CQ";
263 case RES_SRQ: return "RES_SRQ";
264 case RES_MPT: return "RES_MPT";
265 case RES_MTT: return "RES_MTT";
266 case RES_MAC: return "RES_MAC";
267 case RES_EQ: return "RES_EQ";
268 case RES_COUNTER: return "RES_COUNTER";
269 case RES_FS_RULE: return "RES_FS_RULE";
270 case RES_XRCD: return "RES_XRCD";
271 default: return "Unknown resource type !!!";
275 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
277 struct mlx4_priv *priv = mlx4_priv(dev);
281 priv->mfunc.master.res_tracker.slave_list =
282 kzalloc(dev->num_slaves * sizeof(struct slave_list),
284 if (!priv->mfunc.master.res_tracker.slave_list)
287 for (i = 0 ; i < dev->num_slaves; i++) {
288 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
289 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
290 slave_list[i].res_list[t]);
291 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
294 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
296 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
297 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
299 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
303 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
304 enum mlx4_res_tracker_free_type type)
306 struct mlx4_priv *priv = mlx4_priv(dev);
309 if (priv->mfunc.master.res_tracker.slave_list) {
310 if (type != RES_TR_FREE_STRUCTS_ONLY)
311 for (i = 0 ; i < dev->num_slaves; i++)
312 if (type == RES_TR_FREE_ALL ||
313 dev->caps.function != i)
314 mlx4_delete_all_resources_for_slave(dev, i);
316 if (type != RES_TR_FREE_SLAVES_ONLY) {
317 kfree(priv->mfunc.master.res_tracker.slave_list);
318 priv->mfunc.master.res_tracker.slave_list = NULL;
323 static void update_pkey_index(struct mlx4_dev *dev, int slave,
324 struct mlx4_cmd_mailbox *inbox)
326 u8 sched = *(u8 *)(inbox->buf + 64);
327 u8 orig_index = *(u8 *)(inbox->buf + 35);
329 struct mlx4_priv *priv = mlx4_priv(dev);
332 port = (sched >> 6 & 1) + 1;
334 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
335 *(u8 *)(inbox->buf + 35) = new_index;
338 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
341 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
342 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
343 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
345 if (MLX4_QP_ST_UD == ts)
346 qp_ctx->pri_path.mgid_index = 0x80 | slave;
348 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
349 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
350 qp_ctx->pri_path.mgid_index = slave & 0x7F;
351 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352 qp_ctx->alt_path.mgid_index = slave & 0x7F;
356 static int update_vport_qp_param(struct mlx4_dev *dev,
357 struct mlx4_cmd_mailbox *inbox,
360 struct mlx4_qp_context *qpc = inbox->buf + 8;
361 struct mlx4_vport_oper_state *vp_oper;
362 struct mlx4_priv *priv;
366 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
367 priv = mlx4_priv(dev);
368 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
370 if (MLX4_VGT != vp_oper->state.default_vlan) {
371 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
372 if (MLX4_QP_ST_RC == qp_type)
375 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
376 qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/
377 qpc->pri_path.feup |= 1 << 3; /* set fvl bit */
378 qpc->pri_path.sched_queue &= 0xC7;
379 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
380 mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
381 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
382 (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
383 vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
384 (int)(qpc->pri_path.fl));
386 if (vp_oper->state.spoofchk) {
387 qpc->pri_path.feup |= 1 << 5; /* set fsm bit */;
388 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
389 mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n",
390 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
391 (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
397 static int mpt_mask(struct mlx4_dev *dev)
399 return dev->caps.num_mpts - 1;
402 static void *find_res(struct mlx4_dev *dev, u64 res_id,
403 enum mlx4_resource type)
405 struct mlx4_priv *priv = mlx4_priv(dev);
407 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
411 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
412 enum mlx4_resource type,
415 struct res_common *r;
418 spin_lock_irq(mlx4_tlock(dev));
419 r = find_res(dev, res_id, type);
425 if (r->state == RES_ANY_BUSY) {
430 if (r->owner != slave) {
435 r->from_state = r->state;
436 r->state = RES_ANY_BUSY;
439 *((struct res_common **)res) = r;
442 spin_unlock_irq(mlx4_tlock(dev));
446 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
447 enum mlx4_resource type,
448 u64 res_id, int *slave)
451 struct res_common *r;
457 spin_lock(mlx4_tlock(dev));
459 r = find_res(dev, id, type);
464 spin_unlock(mlx4_tlock(dev));
469 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
470 enum mlx4_resource type)
472 struct res_common *r;
474 spin_lock_irq(mlx4_tlock(dev));
475 r = find_res(dev, res_id, type);
477 r->state = r->from_state;
478 spin_unlock_irq(mlx4_tlock(dev));
481 static struct res_common *alloc_qp_tr(int id)
485 ret = kzalloc(sizeof *ret, GFP_KERNEL);
489 ret->com.res_id = id;
490 ret->com.state = RES_QP_RESERVED;
492 INIT_LIST_HEAD(&ret->mcg_list);
493 spin_lock_init(&ret->mcg_spl);
494 atomic_set(&ret->ref_count, 0);
499 static struct res_common *alloc_mtt_tr(int id, int order)
503 ret = kzalloc(sizeof *ret, GFP_KERNEL);
507 ret->com.res_id = id;
509 ret->com.state = RES_MTT_ALLOCATED;
510 atomic_set(&ret->ref_count, 0);
515 static struct res_common *alloc_mpt_tr(int id, int key)
519 ret = kzalloc(sizeof *ret, GFP_KERNEL);
523 ret->com.res_id = id;
524 ret->com.state = RES_MPT_RESERVED;
530 static struct res_common *alloc_eq_tr(int id)
534 ret = kzalloc(sizeof *ret, GFP_KERNEL);
538 ret->com.res_id = id;
539 ret->com.state = RES_EQ_RESERVED;
544 static struct res_common *alloc_cq_tr(int id)
548 ret = kzalloc(sizeof *ret, GFP_KERNEL);
552 ret->com.res_id = id;
553 ret->com.state = RES_CQ_ALLOCATED;
554 atomic_set(&ret->ref_count, 0);
559 static struct res_common *alloc_srq_tr(int id)
563 ret = kzalloc(sizeof *ret, GFP_KERNEL);
567 ret->com.res_id = id;
568 ret->com.state = RES_SRQ_ALLOCATED;
569 atomic_set(&ret->ref_count, 0);
574 static struct res_common *alloc_counter_tr(int id)
576 struct res_counter *ret;
578 ret = kzalloc(sizeof *ret, GFP_KERNEL);
582 ret->com.res_id = id;
583 ret->com.state = RES_COUNTER_ALLOCATED;
588 static struct res_common *alloc_xrcdn_tr(int id)
590 struct res_xrcdn *ret;
592 ret = kzalloc(sizeof *ret, GFP_KERNEL);
596 ret->com.res_id = id;
597 ret->com.state = RES_XRCD_ALLOCATED;
602 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
604 struct res_fs_rule *ret;
606 ret = kzalloc(sizeof *ret, GFP_KERNEL);
610 ret->com.res_id = id;
611 ret->com.state = RES_FS_RULE_ALLOCATED;
616 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
619 struct res_common *ret;
623 ret = alloc_qp_tr(id);
626 ret = alloc_mpt_tr(id, extra);
629 ret = alloc_mtt_tr(id, extra);
632 ret = alloc_eq_tr(id);
635 ret = alloc_cq_tr(id);
638 ret = alloc_srq_tr(id);
641 printk(KERN_ERR "implementation missing\n");
644 ret = alloc_counter_tr(id);
647 ret = alloc_xrcdn_tr(id);
650 ret = alloc_fs_rule_tr(id, extra);
661 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
662 enum mlx4_resource type, int extra)
666 struct mlx4_priv *priv = mlx4_priv(dev);
667 struct res_common **res_arr;
668 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
669 struct rb_root *root = &tracker->res_tree[type];
671 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
675 for (i = 0; i < count; ++i) {
676 res_arr[i] = alloc_tr(base + i, type, slave, extra);
678 for (--i; i >= 0; --i)
686 spin_lock_irq(mlx4_tlock(dev));
687 for (i = 0; i < count; ++i) {
688 if (find_res(dev, base + i, type)) {
692 err = res_tracker_insert(root, res_arr[i]);
695 list_add_tail(&res_arr[i]->list,
696 &tracker->slave_list[slave].res_list[type]);
698 spin_unlock_irq(mlx4_tlock(dev));
704 for (--i; i >= base; --i)
705 rb_erase(&res_arr[i]->node, root);
707 spin_unlock_irq(mlx4_tlock(dev));
709 for (i = 0; i < count; ++i)
717 static int remove_qp_ok(struct res_qp *res)
719 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
720 !list_empty(&res->mcg_list)) {
721 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
722 res->com.state, atomic_read(&res->ref_count));
724 } else if (res->com.state != RES_QP_RESERVED) {
731 static int remove_mtt_ok(struct res_mtt *res, int order)
733 if (res->com.state == RES_MTT_BUSY ||
734 atomic_read(&res->ref_count)) {
735 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
737 mtt_states_str(res->com.state),
738 atomic_read(&res->ref_count));
740 } else if (res->com.state != RES_MTT_ALLOCATED)
742 else if (res->order != order)
748 static int remove_mpt_ok(struct res_mpt *res)
750 if (res->com.state == RES_MPT_BUSY)
752 else if (res->com.state != RES_MPT_RESERVED)
758 static int remove_eq_ok(struct res_eq *res)
760 if (res->com.state == RES_MPT_BUSY)
762 else if (res->com.state != RES_MPT_RESERVED)
768 static int remove_counter_ok(struct res_counter *res)
770 if (res->com.state == RES_COUNTER_BUSY)
772 else if (res->com.state != RES_COUNTER_ALLOCATED)
778 static int remove_xrcdn_ok(struct res_xrcdn *res)
780 if (res->com.state == RES_XRCD_BUSY)
782 else if (res->com.state != RES_XRCD_ALLOCATED)
788 static int remove_fs_rule_ok(struct res_fs_rule *res)
790 if (res->com.state == RES_FS_RULE_BUSY)
792 else if (res->com.state != RES_FS_RULE_ALLOCATED)
798 static int remove_cq_ok(struct res_cq *res)
800 if (res->com.state == RES_CQ_BUSY)
802 else if (res->com.state != RES_CQ_ALLOCATED)
808 static int remove_srq_ok(struct res_srq *res)
810 if (res->com.state == RES_SRQ_BUSY)
812 else if (res->com.state != RES_SRQ_ALLOCATED)
818 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
822 return remove_qp_ok((struct res_qp *)res);
824 return remove_cq_ok((struct res_cq *)res);
826 return remove_srq_ok((struct res_srq *)res);
828 return remove_mpt_ok((struct res_mpt *)res);
830 return remove_mtt_ok((struct res_mtt *)res, extra);
834 return remove_eq_ok((struct res_eq *)res);
836 return remove_counter_ok((struct res_counter *)res);
838 return remove_xrcdn_ok((struct res_xrcdn *)res);
840 return remove_fs_rule_ok((struct res_fs_rule *)res);
846 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
847 enum mlx4_resource type, int extra)
851 struct mlx4_priv *priv = mlx4_priv(dev);
852 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
853 struct res_common *r;
855 spin_lock_irq(mlx4_tlock(dev));
856 for (i = base; i < base + count; ++i) {
857 r = res_tracker_lookup(&tracker->res_tree[type], i);
862 if (r->owner != slave) {
866 err = remove_ok(r, type, extra);
871 for (i = base; i < base + count; ++i) {
872 r = res_tracker_lookup(&tracker->res_tree[type], i);
873 rb_erase(&r->node, &tracker->res_tree[type]);
880 spin_unlock_irq(mlx4_tlock(dev));
885 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
886 enum res_qp_states state, struct res_qp **qp,
889 struct mlx4_priv *priv = mlx4_priv(dev);
890 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
894 spin_lock_irq(mlx4_tlock(dev));
895 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
898 else if (r->com.owner != slave)
903 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
904 __func__, r->com.res_id);
908 case RES_QP_RESERVED:
909 if (r->com.state == RES_QP_MAPPED && !alloc)
912 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
917 if ((r->com.state == RES_QP_RESERVED && alloc) ||
918 r->com.state == RES_QP_HW)
921 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
929 if (r->com.state != RES_QP_MAPPED)
937 r->com.from_state = r->com.state;
938 r->com.to_state = state;
939 r->com.state = RES_QP_BUSY;
945 spin_unlock_irq(mlx4_tlock(dev));
950 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
951 enum res_mpt_states state, struct res_mpt **mpt)
953 struct mlx4_priv *priv = mlx4_priv(dev);
954 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
958 spin_lock_irq(mlx4_tlock(dev));
959 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
962 else if (r->com.owner != slave)
970 case RES_MPT_RESERVED:
971 if (r->com.state != RES_MPT_MAPPED)
976 if (r->com.state != RES_MPT_RESERVED &&
977 r->com.state != RES_MPT_HW)
982 if (r->com.state != RES_MPT_MAPPED)
990 r->com.from_state = r->com.state;
991 r->com.to_state = state;
992 r->com.state = RES_MPT_BUSY;
998 spin_unlock_irq(mlx4_tlock(dev));
1003 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1004 enum res_eq_states state, struct res_eq **eq)
1006 struct mlx4_priv *priv = mlx4_priv(dev);
1007 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1011 spin_lock_irq(mlx4_tlock(dev));
1012 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1015 else if (r->com.owner != slave)
1023 case RES_EQ_RESERVED:
1024 if (r->com.state != RES_EQ_HW)
1029 if (r->com.state != RES_EQ_RESERVED)
1038 r->com.from_state = r->com.state;
1039 r->com.to_state = state;
1040 r->com.state = RES_EQ_BUSY;
1046 spin_unlock_irq(mlx4_tlock(dev));
1051 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1052 enum res_cq_states state, struct res_cq **cq)
1054 struct mlx4_priv *priv = mlx4_priv(dev);
1055 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1059 spin_lock_irq(mlx4_tlock(dev));
1060 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1063 else if (r->com.owner != slave)
1071 case RES_CQ_ALLOCATED:
1072 if (r->com.state != RES_CQ_HW)
1074 else if (atomic_read(&r->ref_count))
1081 if (r->com.state != RES_CQ_ALLOCATED)
1092 r->com.from_state = r->com.state;
1093 r->com.to_state = state;
1094 r->com.state = RES_CQ_BUSY;
1100 spin_unlock_irq(mlx4_tlock(dev));
1105 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1106 enum res_cq_states state, struct res_srq **srq)
1108 struct mlx4_priv *priv = mlx4_priv(dev);
1109 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1113 spin_lock_irq(mlx4_tlock(dev));
1114 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1117 else if (r->com.owner != slave)
1125 case RES_SRQ_ALLOCATED:
1126 if (r->com.state != RES_SRQ_HW)
1128 else if (atomic_read(&r->ref_count))
1133 if (r->com.state != RES_SRQ_ALLOCATED)
1142 r->com.from_state = r->com.state;
1143 r->com.to_state = state;
1144 r->com.state = RES_SRQ_BUSY;
1150 spin_unlock_irq(mlx4_tlock(dev));
1155 static void res_abort_move(struct mlx4_dev *dev, int slave,
1156 enum mlx4_resource type, int id)
1158 struct mlx4_priv *priv = mlx4_priv(dev);
1159 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1160 struct res_common *r;
1162 spin_lock_irq(mlx4_tlock(dev));
1163 r = res_tracker_lookup(&tracker->res_tree[type], id);
1164 if (r && (r->owner == slave))
1165 r->state = r->from_state;
1166 spin_unlock_irq(mlx4_tlock(dev));
1169 static void res_end_move(struct mlx4_dev *dev, int slave,
1170 enum mlx4_resource type, int id)
1172 struct mlx4_priv *priv = mlx4_priv(dev);
1173 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1174 struct res_common *r;
1176 spin_lock_irq(mlx4_tlock(dev));
1177 r = res_tracker_lookup(&tracker->res_tree[type], id);
1178 if (r && (r->owner == slave))
1179 r->state = r->to_state;
1180 spin_unlock_irq(mlx4_tlock(dev));
1183 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1185 return mlx4_is_qp_reserved(dev, qpn) &&
1186 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1189 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1191 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1194 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1195 u64 in_param, u64 *out_param)
1204 case RES_OP_RESERVE:
1205 count = get_param_l(&in_param);
1206 align = get_param_h(&in_param);
1207 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1211 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1213 __mlx4_qp_release_range(dev, base, count);
1216 set_param_l(out_param, base);
1218 case RES_OP_MAP_ICM:
1219 qpn = get_param_l(&in_param) & 0x7fffff;
1220 if (valid_reserved(dev, slave, qpn)) {
1221 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1226 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1231 if (!fw_reserved(dev, qpn)) {
1232 err = __mlx4_qp_alloc_icm(dev, qpn);
1234 res_abort_move(dev, slave, RES_QP, qpn);
1239 res_end_move(dev, slave, RES_QP, qpn);
1249 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1250 u64 in_param, u64 *out_param)
1256 if (op != RES_OP_RESERVE_AND_MAP)
1259 order = get_param_l(&in_param);
1260 base = __mlx4_alloc_mtt_range(dev, order);
1264 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1266 __mlx4_free_mtt_range(dev, base, order);
1268 set_param_l(out_param, base);
1273 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1274 u64 in_param, u64 *out_param)
1279 struct res_mpt *mpt;
1282 case RES_OP_RESERVE:
1283 index = __mlx4_mpt_reserve(dev);
1286 id = index & mpt_mask(dev);
1288 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1290 __mlx4_mpt_release(dev, index);
1293 set_param_l(out_param, index);
1295 case RES_OP_MAP_ICM:
1296 index = get_param_l(&in_param);
1297 id = index & mpt_mask(dev);
1298 err = mr_res_start_move_to(dev, slave, id,
1299 RES_MPT_MAPPED, &mpt);
1303 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1305 res_abort_move(dev, slave, RES_MPT, id);
1309 res_end_move(dev, slave, RES_MPT, id);
1315 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1316 u64 in_param, u64 *out_param)
1322 case RES_OP_RESERVE_AND_MAP:
1323 err = __mlx4_cq_alloc_icm(dev, &cqn);
1327 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1329 __mlx4_cq_free_icm(dev, cqn);
1333 set_param_l(out_param, cqn);
1343 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1344 u64 in_param, u64 *out_param)
1350 case RES_OP_RESERVE_AND_MAP:
1351 err = __mlx4_srq_alloc_icm(dev, &srqn);
1355 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1357 __mlx4_srq_free_icm(dev, srqn);
1361 set_param_l(out_param, srqn);
1371 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1373 struct mlx4_priv *priv = mlx4_priv(dev);
1374 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1375 struct mac_res *res;
1377 res = kzalloc(sizeof *res, GFP_KERNEL);
1381 res->port = (u8) port;
1382 list_add_tail(&res->list,
1383 &tracker->slave_list[slave].res_list[RES_MAC]);
1387 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1390 struct mlx4_priv *priv = mlx4_priv(dev);
1391 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1392 struct list_head *mac_list =
1393 &tracker->slave_list[slave].res_list[RES_MAC];
1394 struct mac_res *res, *tmp;
1396 list_for_each_entry_safe(res, tmp, mac_list, list) {
1397 if (res->mac == mac && res->port == (u8) port) {
1398 list_del(&res->list);
1405 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1407 struct mlx4_priv *priv = mlx4_priv(dev);
1408 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1409 struct list_head *mac_list =
1410 &tracker->slave_list[slave].res_list[RES_MAC];
1411 struct mac_res *res, *tmp;
1413 list_for_each_entry_safe(res, tmp, mac_list, list) {
1414 list_del(&res->list);
1415 __mlx4_unregister_mac(dev, res->port, res->mac);
1420 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1421 u64 in_param, u64 *out_param)
1427 if (op != RES_OP_RESERVE_AND_MAP)
1430 port = get_param_l(out_param);
1433 err = __mlx4_register_mac(dev, port, mac);
1435 set_param_l(out_param, err);
1440 err = mac_add_to_slave(dev, slave, mac, port);
1442 __mlx4_unregister_mac(dev, port, mac);
1447 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1448 u64 in_param, u64 *out_param)
1453 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1454 u64 in_param, u64 *out_param)
1459 if (op != RES_OP_RESERVE)
1462 err = __mlx4_counter_alloc(dev, &index);
1466 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1468 __mlx4_counter_free(dev, index);
1470 set_param_l(out_param, index);
1475 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1476 u64 in_param, u64 *out_param)
1481 if (op != RES_OP_RESERVE)
1484 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1488 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1490 __mlx4_xrcd_free(dev, xrcdn);
1492 set_param_l(out_param, xrcdn);
1497 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1498 struct mlx4_vhcr *vhcr,
1499 struct mlx4_cmd_mailbox *inbox,
1500 struct mlx4_cmd_mailbox *outbox,
1501 struct mlx4_cmd_info *cmd)
1504 int alop = vhcr->op_modifier;
1506 switch (vhcr->in_modifier) {
1508 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1509 vhcr->in_param, &vhcr->out_param);
1513 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1514 vhcr->in_param, &vhcr->out_param);
1518 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1519 vhcr->in_param, &vhcr->out_param);
1523 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1524 vhcr->in_param, &vhcr->out_param);
1528 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1529 vhcr->in_param, &vhcr->out_param);
1533 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1534 vhcr->in_param, &vhcr->out_param);
1538 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1539 vhcr->in_param, &vhcr->out_param);
1543 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1544 vhcr->in_param, &vhcr->out_param);
1548 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1549 vhcr->in_param, &vhcr->out_param);
1560 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1569 case RES_OP_RESERVE:
1570 base = get_param_l(&in_param) & 0x7fffff;
1571 count = get_param_h(&in_param);
1572 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1575 __mlx4_qp_release_range(dev, base, count);
1577 case RES_OP_MAP_ICM:
1578 qpn = get_param_l(&in_param) & 0x7fffff;
1579 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1584 if (!fw_reserved(dev, qpn))
1585 __mlx4_qp_free_icm(dev, qpn);
1587 res_end_move(dev, slave, RES_QP, qpn);
1589 if (valid_reserved(dev, slave, qpn))
1590 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1599 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1600 u64 in_param, u64 *out_param)
1606 if (op != RES_OP_RESERVE_AND_MAP)
1609 base = get_param_l(&in_param);
1610 order = get_param_h(&in_param);
1611 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1613 __mlx4_free_mtt_range(dev, base, order);
1617 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1623 struct res_mpt *mpt;
1626 case RES_OP_RESERVE:
1627 index = get_param_l(&in_param);
1628 id = index & mpt_mask(dev);
1629 err = get_res(dev, slave, id, RES_MPT, &mpt);
1633 put_res(dev, slave, id, RES_MPT);
1635 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1638 __mlx4_mpt_release(dev, index);
1640 case RES_OP_MAP_ICM:
1641 index = get_param_l(&in_param);
1642 id = index & mpt_mask(dev);
1643 err = mr_res_start_move_to(dev, slave, id,
1644 RES_MPT_RESERVED, &mpt);
1648 __mlx4_mpt_free_icm(dev, mpt->key);
1649 res_end_move(dev, slave, RES_MPT, id);
1659 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1660 u64 in_param, u64 *out_param)
1666 case RES_OP_RESERVE_AND_MAP:
1667 cqn = get_param_l(&in_param);
1668 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1672 __mlx4_cq_free_icm(dev, cqn);
1683 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1684 u64 in_param, u64 *out_param)
1690 case RES_OP_RESERVE_AND_MAP:
1691 srqn = get_param_l(&in_param);
1692 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1696 __mlx4_srq_free_icm(dev, srqn);
1707 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1708 u64 in_param, u64 *out_param)
1714 case RES_OP_RESERVE_AND_MAP:
1715 port = get_param_l(out_param);
1716 mac_del_from_slave(dev, slave, in_param, port);
1717 __mlx4_unregister_mac(dev, port, in_param);
1728 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1729 u64 in_param, u64 *out_param)
1734 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1735 u64 in_param, u64 *out_param)
1740 if (op != RES_OP_RESERVE)
1743 index = get_param_l(&in_param);
1744 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1748 __mlx4_counter_free(dev, index);
1753 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1754 u64 in_param, u64 *out_param)
1759 if (op != RES_OP_RESERVE)
1762 xrcdn = get_param_l(&in_param);
1763 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1767 __mlx4_xrcd_free(dev, xrcdn);
1772 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1773 struct mlx4_vhcr *vhcr,
1774 struct mlx4_cmd_mailbox *inbox,
1775 struct mlx4_cmd_mailbox *outbox,
1776 struct mlx4_cmd_info *cmd)
1779 int alop = vhcr->op_modifier;
1781 switch (vhcr->in_modifier) {
1783 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1788 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1789 vhcr->in_param, &vhcr->out_param);
1793 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1798 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1799 vhcr->in_param, &vhcr->out_param);
1803 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1804 vhcr->in_param, &vhcr->out_param);
1808 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1809 vhcr->in_param, &vhcr->out_param);
1813 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1814 vhcr->in_param, &vhcr->out_param);
1818 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1819 vhcr->in_param, &vhcr->out_param);
1823 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1824 vhcr->in_param, &vhcr->out_param);
1832 /* ugly but other choices are uglier */
1833 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1835 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1838 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1840 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1843 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1845 return be32_to_cpu(mpt->mtt_sz);
1848 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
1850 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
1853 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
1855 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
1858 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
1860 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
1863 static int mr_is_region(struct mlx4_mpt_entry *mpt)
1865 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
1868 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1870 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1873 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1875 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1878 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1880 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1881 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1882 int log_sq_sride = qpc->sq_size_stride & 7;
1883 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1884 int log_rq_stride = qpc->rq_size_stride & 7;
1885 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1886 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1887 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1892 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1894 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1895 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1896 total_mem = sq_size + rq_size;
1898 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1904 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1905 int size, struct res_mtt *mtt)
1907 int res_start = mtt->com.res_id;
1908 int res_size = (1 << mtt->order);
1910 if (start < res_start || start + size > res_start + res_size)
1915 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1916 struct mlx4_vhcr *vhcr,
1917 struct mlx4_cmd_mailbox *inbox,
1918 struct mlx4_cmd_mailbox *outbox,
1919 struct mlx4_cmd_info *cmd)
1922 int index = vhcr->in_modifier;
1923 struct res_mtt *mtt;
1924 struct res_mpt *mpt;
1925 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1931 id = index & mpt_mask(dev);
1932 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1936 /* Disable memory windows for VFs. */
1937 if (!mr_is_region(inbox->buf)) {
1942 /* Make sure that the PD bits related to the slave id are zeros. */
1943 pd = mr_get_pd(inbox->buf);
1944 pd_slave = (pd >> 17) & 0x7f;
1945 if (pd_slave != 0 && pd_slave != slave) {
1950 if (mr_is_fmr(inbox->buf)) {
1951 /* FMR and Bind Enable are forbidden in slave devices. */
1952 if (mr_is_bind_enabled(inbox->buf)) {
1956 /* FMR and Memory Windows are also forbidden. */
1957 if (!mr_is_region(inbox->buf)) {
1963 phys = mr_phys_mpt(inbox->buf);
1965 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1969 err = check_mtt_range(dev, slave, mtt_base,
1970 mr_get_mtt_size(inbox->buf), mtt);
1977 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1982 atomic_inc(&mtt->ref_count);
1983 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1986 res_end_move(dev, slave, RES_MPT, id);
1991 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1993 res_abort_move(dev, slave, RES_MPT, id);
1998 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1999 struct mlx4_vhcr *vhcr,
2000 struct mlx4_cmd_mailbox *inbox,
2001 struct mlx4_cmd_mailbox *outbox,
2002 struct mlx4_cmd_info *cmd)
2005 int index = vhcr->in_modifier;
2006 struct res_mpt *mpt;
2009 id = index & mpt_mask(dev);
2010 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2014 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2019 atomic_dec(&mpt->mtt->ref_count);
2021 res_end_move(dev, slave, RES_MPT, id);
2025 res_abort_move(dev, slave, RES_MPT, id);
2030 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2031 struct mlx4_vhcr *vhcr,
2032 struct mlx4_cmd_mailbox *inbox,
2033 struct mlx4_cmd_mailbox *outbox,
2034 struct mlx4_cmd_info *cmd)
2037 int index = vhcr->in_modifier;
2038 struct res_mpt *mpt;
2041 id = index & mpt_mask(dev);
2042 err = get_res(dev, slave, id, RES_MPT, &mpt);
2046 if (mpt->com.from_state != RES_MPT_HW) {
2051 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2054 put_res(dev, slave, id, RES_MPT);
2058 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2060 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2063 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2065 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2068 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2070 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2073 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2074 struct mlx4_qp_context *context)
2076 u32 qpn = vhcr->in_modifier & 0xffffff;
2079 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2082 /* adjust qkey in qp context */
2083 context->qkey = cpu_to_be32(qkey);
2086 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2087 struct mlx4_vhcr *vhcr,
2088 struct mlx4_cmd_mailbox *inbox,
2089 struct mlx4_cmd_mailbox *outbox,
2090 struct mlx4_cmd_info *cmd)
2093 int qpn = vhcr->in_modifier & 0x7fffff;
2094 struct res_mtt *mtt;
2096 struct mlx4_qp_context *qpc = inbox->buf + 8;
2097 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2098 int mtt_size = qp_get_mtt_size(qpc);
2101 int rcqn = qp_get_rcqn(qpc);
2102 int scqn = qp_get_scqn(qpc);
2103 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2104 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2105 struct res_srq *srq;
2106 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2108 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2111 qp->local_qpn = local_qpn;
2113 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2117 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2121 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2126 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2133 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2138 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2139 update_pkey_index(dev, slave, inbox);
2140 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2143 atomic_inc(&mtt->ref_count);
2145 atomic_inc(&rcq->ref_count);
2147 atomic_inc(&scq->ref_count);
2151 put_res(dev, slave, scqn, RES_CQ);
2154 atomic_inc(&srq->ref_count);
2155 put_res(dev, slave, srqn, RES_SRQ);
2158 put_res(dev, slave, rcqn, RES_CQ);
2159 put_res(dev, slave, mtt_base, RES_MTT);
2160 res_end_move(dev, slave, RES_QP, qpn);
2166 put_res(dev, slave, srqn, RES_SRQ);
2169 put_res(dev, slave, scqn, RES_CQ);
2171 put_res(dev, slave, rcqn, RES_CQ);
2173 put_res(dev, slave, mtt_base, RES_MTT);
2175 res_abort_move(dev, slave, RES_QP, qpn);
2180 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2182 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2185 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2187 int log_eq_size = eqc->log_eq_size & 0x1f;
2188 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2190 if (log_eq_size + 5 < page_shift)
2193 return 1 << (log_eq_size + 5 - page_shift);
2196 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2198 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2201 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2203 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2204 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2206 if (log_cq_size + 5 < page_shift)
2209 return 1 << (log_cq_size + 5 - page_shift);
2212 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2213 struct mlx4_vhcr *vhcr,
2214 struct mlx4_cmd_mailbox *inbox,
2215 struct mlx4_cmd_mailbox *outbox,
2216 struct mlx4_cmd_info *cmd)
2219 int eqn = vhcr->in_modifier;
2220 int res_id = (slave << 8) | eqn;
2221 struct mlx4_eq_context *eqc = inbox->buf;
2222 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2223 int mtt_size = eq_get_mtt_size(eqc);
2225 struct res_mtt *mtt;
2227 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2230 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2234 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2238 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2242 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2246 atomic_inc(&mtt->ref_count);
2248 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2249 res_end_move(dev, slave, RES_EQ, res_id);
2253 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2255 res_abort_move(dev, slave, RES_EQ, res_id);
2257 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2261 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2262 int len, struct res_mtt **res)
2264 struct mlx4_priv *priv = mlx4_priv(dev);
2265 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2266 struct res_mtt *mtt;
2269 spin_lock_irq(mlx4_tlock(dev));
2270 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2272 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2274 mtt->com.from_state = mtt->com.state;
2275 mtt->com.state = RES_MTT_BUSY;
2280 spin_unlock_irq(mlx4_tlock(dev));
2285 static int verify_qp_parameters(struct mlx4_dev *dev,
2286 struct mlx4_cmd_mailbox *inbox,
2287 enum qp_transition transition, u8 slave)
2290 struct mlx4_qp_context *qp_ctx;
2291 enum mlx4_qp_optpar optpar;
2293 qp_ctx = inbox->buf + 8;
2294 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2295 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2300 switch (transition) {
2301 case QP_TRANS_INIT2RTR:
2302 case QP_TRANS_RTR2RTS:
2303 case QP_TRANS_RTS2RTS:
2304 case QP_TRANS_SQD2SQD:
2305 case QP_TRANS_SQD2RTS:
2306 if (slave != mlx4_master_func_num(dev))
2307 /* slaves have only gid index 0 */
2308 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH)
2309 if (qp_ctx->pri_path.mgid_index)
2311 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
2312 if (qp_ctx->alt_path.mgid_index)
2327 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2328 struct mlx4_vhcr *vhcr,
2329 struct mlx4_cmd_mailbox *inbox,
2330 struct mlx4_cmd_mailbox *outbox,
2331 struct mlx4_cmd_info *cmd)
2333 struct mlx4_mtt mtt;
2334 __be64 *page_list = inbox->buf;
2335 u64 *pg_list = (u64 *)page_list;
2337 struct res_mtt *rmtt = NULL;
2338 int start = be64_to_cpu(page_list[0]);
2339 int npages = vhcr->in_modifier;
2342 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2346 /* Call the SW implementation of write_mtt:
2347 * - Prepare a dummy mtt struct
2348 * - Translate inbox contents to simple addresses in host endianess */
2349 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2350 we don't really use it */
2353 for (i = 0; i < npages; ++i)
2354 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2356 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2357 ((u64 *)page_list + 2));
2360 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2365 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2366 struct mlx4_vhcr *vhcr,
2367 struct mlx4_cmd_mailbox *inbox,
2368 struct mlx4_cmd_mailbox *outbox,
2369 struct mlx4_cmd_info *cmd)
2371 int eqn = vhcr->in_modifier;
2372 int res_id = eqn | (slave << 8);
2376 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2380 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2384 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2388 atomic_dec(&eq->mtt->ref_count);
2389 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2390 res_end_move(dev, slave, RES_EQ, res_id);
2391 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2396 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2398 res_abort_move(dev, slave, RES_EQ, res_id);
2403 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2405 struct mlx4_priv *priv = mlx4_priv(dev);
2406 struct mlx4_slave_event_eq_info *event_eq;
2407 struct mlx4_cmd_mailbox *mailbox;
2408 u32 in_modifier = 0;
2413 if (!priv->mfunc.master.slave_state)
2416 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2418 /* Create the event only if the slave is registered */
2419 if (event_eq->eqn < 0)
2422 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2423 res_id = (slave << 8) | event_eq->eqn;
2424 err = get_res(dev, slave, res_id, RES_EQ, &req);
2428 if (req->com.from_state != RES_EQ_HW) {
2433 mailbox = mlx4_alloc_cmd_mailbox(dev);
2434 if (IS_ERR(mailbox)) {
2435 err = PTR_ERR(mailbox);
2439 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2441 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2444 memcpy(mailbox->buf, (u8 *) eqe, 28);
2446 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2448 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2449 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2452 put_res(dev, slave, res_id, RES_EQ);
2453 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2454 mlx4_free_cmd_mailbox(dev, mailbox);
2458 put_res(dev, slave, res_id, RES_EQ);
2461 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2465 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2466 struct mlx4_vhcr *vhcr,
2467 struct mlx4_cmd_mailbox *inbox,
2468 struct mlx4_cmd_mailbox *outbox,
2469 struct mlx4_cmd_info *cmd)
2471 int eqn = vhcr->in_modifier;
2472 int res_id = eqn | (slave << 8);
2476 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2480 if (eq->com.from_state != RES_EQ_HW) {
2485 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2488 put_res(dev, slave, res_id, RES_EQ);
2492 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2493 struct mlx4_vhcr *vhcr,
2494 struct mlx4_cmd_mailbox *inbox,
2495 struct mlx4_cmd_mailbox *outbox,
2496 struct mlx4_cmd_info *cmd)
2499 int cqn = vhcr->in_modifier;
2500 struct mlx4_cq_context *cqc = inbox->buf;
2501 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2503 struct res_mtt *mtt;
2505 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2508 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2511 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2514 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2517 atomic_inc(&mtt->ref_count);
2519 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2520 res_end_move(dev, slave, RES_CQ, cqn);
2524 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2526 res_abort_move(dev, slave, RES_CQ, cqn);
2530 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2531 struct mlx4_vhcr *vhcr,
2532 struct mlx4_cmd_mailbox *inbox,
2533 struct mlx4_cmd_mailbox *outbox,
2534 struct mlx4_cmd_info *cmd)
2537 int cqn = vhcr->in_modifier;
2540 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2543 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2546 atomic_dec(&cq->mtt->ref_count);
2547 res_end_move(dev, slave, RES_CQ, cqn);
2551 res_abort_move(dev, slave, RES_CQ, cqn);
2555 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2556 struct mlx4_vhcr *vhcr,
2557 struct mlx4_cmd_mailbox *inbox,
2558 struct mlx4_cmd_mailbox *outbox,
2559 struct mlx4_cmd_info *cmd)
2561 int cqn = vhcr->in_modifier;
2565 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2569 if (cq->com.from_state != RES_CQ_HW)
2572 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2574 put_res(dev, slave, cqn, RES_CQ);
2579 static int handle_resize(struct mlx4_dev *dev, int slave,
2580 struct mlx4_vhcr *vhcr,
2581 struct mlx4_cmd_mailbox *inbox,
2582 struct mlx4_cmd_mailbox *outbox,
2583 struct mlx4_cmd_info *cmd,
2587 struct res_mtt *orig_mtt;
2588 struct res_mtt *mtt;
2589 struct mlx4_cq_context *cqc = inbox->buf;
2590 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2592 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2596 if (orig_mtt != cq->mtt) {
2601 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2605 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2608 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2611 atomic_dec(&orig_mtt->ref_count);
2612 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2613 atomic_inc(&mtt->ref_count);
2615 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2619 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2621 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2627 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2628 struct mlx4_vhcr *vhcr,
2629 struct mlx4_cmd_mailbox *inbox,
2630 struct mlx4_cmd_mailbox *outbox,
2631 struct mlx4_cmd_info *cmd)
2633 int cqn = vhcr->in_modifier;
2637 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2641 if (cq->com.from_state != RES_CQ_HW)
2644 if (vhcr->op_modifier == 0) {
2645 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2649 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2651 put_res(dev, slave, cqn, RES_CQ);
2656 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2658 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2659 int log_rq_stride = srqc->logstride & 7;
2660 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2662 if (log_srq_size + log_rq_stride + 4 < page_shift)
2665 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2668 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2669 struct mlx4_vhcr *vhcr,
2670 struct mlx4_cmd_mailbox *inbox,
2671 struct mlx4_cmd_mailbox *outbox,
2672 struct mlx4_cmd_info *cmd)
2675 int srqn = vhcr->in_modifier;
2676 struct res_mtt *mtt;
2677 struct res_srq *srq;
2678 struct mlx4_srq_context *srqc = inbox->buf;
2679 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2681 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2684 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2687 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2690 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2695 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2699 atomic_inc(&mtt->ref_count);
2701 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2702 res_end_move(dev, slave, RES_SRQ, srqn);
2706 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2708 res_abort_move(dev, slave, RES_SRQ, srqn);
2713 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2714 struct mlx4_vhcr *vhcr,
2715 struct mlx4_cmd_mailbox *inbox,
2716 struct mlx4_cmd_mailbox *outbox,
2717 struct mlx4_cmd_info *cmd)
2720 int srqn = vhcr->in_modifier;
2721 struct res_srq *srq;
2723 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2726 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2729 atomic_dec(&srq->mtt->ref_count);
2731 atomic_dec(&srq->cq->ref_count);
2732 res_end_move(dev, slave, RES_SRQ, srqn);
2737 res_abort_move(dev, slave, RES_SRQ, srqn);
2742 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2743 struct mlx4_vhcr *vhcr,
2744 struct mlx4_cmd_mailbox *inbox,
2745 struct mlx4_cmd_mailbox *outbox,
2746 struct mlx4_cmd_info *cmd)
2749 int srqn = vhcr->in_modifier;
2750 struct res_srq *srq;
2752 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2755 if (srq->com.from_state != RES_SRQ_HW) {
2759 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2761 put_res(dev, slave, srqn, RES_SRQ);
2765 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2766 struct mlx4_vhcr *vhcr,
2767 struct mlx4_cmd_mailbox *inbox,
2768 struct mlx4_cmd_mailbox *outbox,
2769 struct mlx4_cmd_info *cmd)
2772 int srqn = vhcr->in_modifier;
2773 struct res_srq *srq;
2775 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2779 if (srq->com.from_state != RES_SRQ_HW) {
2784 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2786 put_res(dev, slave, srqn, RES_SRQ);
2790 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2791 struct mlx4_vhcr *vhcr,
2792 struct mlx4_cmd_mailbox *inbox,
2793 struct mlx4_cmd_mailbox *outbox,
2794 struct mlx4_cmd_info *cmd)
2797 int qpn = vhcr->in_modifier & 0x7fffff;
2800 err = get_res(dev, slave, qpn, RES_QP, &qp);
2803 if (qp->com.from_state != RES_QP_HW) {
2808 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2810 put_res(dev, slave, qpn, RES_QP);
2814 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2815 struct mlx4_vhcr *vhcr,
2816 struct mlx4_cmd_mailbox *inbox,
2817 struct mlx4_cmd_mailbox *outbox,
2818 struct mlx4_cmd_info *cmd)
2820 struct mlx4_qp_context *context = inbox->buf + 8;
2821 adjust_proxy_tun_qkey(dev, vhcr, context);
2822 update_pkey_index(dev, slave, inbox);
2823 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2826 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2827 struct mlx4_vhcr *vhcr,
2828 struct mlx4_cmd_mailbox *inbox,
2829 struct mlx4_cmd_mailbox *outbox,
2830 struct mlx4_cmd_info *cmd)
2833 struct mlx4_qp_context *qpc = inbox->buf + 8;
2835 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
2839 update_pkey_index(dev, slave, inbox);
2840 update_gid(dev, inbox, (u8)slave);
2841 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2842 err = update_vport_qp_param(dev, inbox, slave);
2846 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2849 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2850 struct mlx4_vhcr *vhcr,
2851 struct mlx4_cmd_mailbox *inbox,
2852 struct mlx4_cmd_mailbox *outbox,
2853 struct mlx4_cmd_info *cmd)
2856 struct mlx4_qp_context *context = inbox->buf + 8;
2858 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
2862 update_pkey_index(dev, slave, inbox);
2863 update_gid(dev, inbox, (u8)slave);
2864 adjust_proxy_tun_qkey(dev, vhcr, context);
2865 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2868 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2869 struct mlx4_vhcr *vhcr,
2870 struct mlx4_cmd_mailbox *inbox,
2871 struct mlx4_cmd_mailbox *outbox,
2872 struct mlx4_cmd_info *cmd)
2875 struct mlx4_qp_context *context = inbox->buf + 8;
2877 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
2881 update_pkey_index(dev, slave, inbox);
2882 update_gid(dev, inbox, (u8)slave);
2883 adjust_proxy_tun_qkey(dev, vhcr, context);
2884 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2888 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2889 struct mlx4_vhcr *vhcr,
2890 struct mlx4_cmd_mailbox *inbox,
2891 struct mlx4_cmd_mailbox *outbox,
2892 struct mlx4_cmd_info *cmd)
2894 struct mlx4_qp_context *context = inbox->buf + 8;
2895 adjust_proxy_tun_qkey(dev, vhcr, context);
2896 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2899 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
2900 struct mlx4_vhcr *vhcr,
2901 struct mlx4_cmd_mailbox *inbox,
2902 struct mlx4_cmd_mailbox *outbox,
2903 struct mlx4_cmd_info *cmd)
2906 struct mlx4_qp_context *context = inbox->buf + 8;
2908 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
2912 adjust_proxy_tun_qkey(dev, vhcr, context);
2913 update_gid(dev, inbox, (u8)slave);
2914 update_pkey_index(dev, slave, inbox);
2915 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2918 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
2919 struct mlx4_vhcr *vhcr,
2920 struct mlx4_cmd_mailbox *inbox,
2921 struct mlx4_cmd_mailbox *outbox,
2922 struct mlx4_cmd_info *cmd)
2925 struct mlx4_qp_context *context = inbox->buf + 8;
2927 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
2931 adjust_proxy_tun_qkey(dev, vhcr, context);
2932 update_gid(dev, inbox, (u8)slave);
2933 update_pkey_index(dev, slave, inbox);
2934 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2937 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2938 struct mlx4_vhcr *vhcr,
2939 struct mlx4_cmd_mailbox *inbox,
2940 struct mlx4_cmd_mailbox *outbox,
2941 struct mlx4_cmd_info *cmd)
2944 int qpn = vhcr->in_modifier & 0x7fffff;
2947 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2950 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2954 atomic_dec(&qp->mtt->ref_count);
2955 atomic_dec(&qp->rcq->ref_count);
2956 atomic_dec(&qp->scq->ref_count);
2958 atomic_dec(&qp->srq->ref_count);
2959 res_end_move(dev, slave, RES_QP, qpn);
2963 res_abort_move(dev, slave, RES_QP, qpn);
2968 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2969 struct res_qp *rqp, u8 *gid)
2971 struct res_gid *res;
2973 list_for_each_entry(res, &rqp->mcg_list, list) {
2974 if (!memcmp(res->gid, gid, 16))
2980 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2981 u8 *gid, enum mlx4_protocol prot,
2982 enum mlx4_steer_type steer, u64 reg_id)
2984 struct res_gid *res;
2987 res = kzalloc(sizeof *res, GFP_KERNEL);
2991 spin_lock_irq(&rqp->mcg_spl);
2992 if (find_gid(dev, slave, rqp, gid)) {
2996 memcpy(res->gid, gid, 16);
2999 res->reg_id = reg_id;
3000 list_add_tail(&res->list, &rqp->mcg_list);
3003 spin_unlock_irq(&rqp->mcg_spl);
3008 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3009 u8 *gid, enum mlx4_protocol prot,
3010 enum mlx4_steer_type steer, u64 *reg_id)
3012 struct res_gid *res;
3015 spin_lock_irq(&rqp->mcg_spl);
3016 res = find_gid(dev, slave, rqp, gid);
3017 if (!res || res->prot != prot || res->steer != steer)
3020 *reg_id = res->reg_id;
3021 list_del(&res->list);
3025 spin_unlock_irq(&rqp->mcg_spl);
3030 static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3031 int block_loopback, enum mlx4_protocol prot,
3032 enum mlx4_steer_type type, u64 *reg_id)
3034 switch (dev->caps.steering_mode) {
3035 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3036 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3037 block_loopback, prot,
3039 case MLX4_STEERING_MODE_B0:
3040 return mlx4_qp_attach_common(dev, qp, gid,
3041 block_loopback, prot, type);
3047 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3048 enum mlx4_protocol prot, enum mlx4_steer_type type,
3051 switch (dev->caps.steering_mode) {
3052 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3053 return mlx4_flow_detach(dev, reg_id);
3054 case MLX4_STEERING_MODE_B0:
3055 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3061 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3062 struct mlx4_vhcr *vhcr,
3063 struct mlx4_cmd_mailbox *inbox,
3064 struct mlx4_cmd_mailbox *outbox,
3065 struct mlx4_cmd_info *cmd)
3067 struct mlx4_qp qp; /* dummy for calling attach/detach */
3068 u8 *gid = inbox->buf;
3069 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3074 int attach = vhcr->op_modifier;
3075 int block_loopback = vhcr->in_modifier >> 31;
3076 u8 steer_type_mask = 2;
3077 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3079 qpn = vhcr->in_modifier & 0xffffff;
3080 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3086 err = qp_attach(dev, &qp, gid, block_loopback, prot,
3089 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3092 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3096 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
3100 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3102 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3105 put_res(dev, slave, qpn, RES_QP);
3109 qp_detach(dev, &qp, gid, prot, type, reg_id);
3111 put_res(dev, slave, qpn, RES_QP);
3116 * MAC validation for Flow Steering rules.
3117 * VF can attach rules only with a mac address which is assigned to it.
3119 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3120 struct list_head *rlist)
3122 struct mac_res *res, *tmp;
3125 /* make sure it isn't multicast or broadcast mac*/
3126 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3127 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3128 list_for_each_entry_safe(res, tmp, rlist, list) {
3129 be_mac = cpu_to_be64(res->mac << 16);
3130 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3133 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3134 eth_header->eth.dst_mac, slave);
3141 * In case of missing eth header, append eth header with a MAC address
3142 * assigned to the VF.
3144 static int add_eth_header(struct mlx4_dev *dev, int slave,
3145 struct mlx4_cmd_mailbox *inbox,
3146 struct list_head *rlist, int header_id)
3148 struct mac_res *res, *tmp;
3150 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3151 struct mlx4_net_trans_rule_hw_eth *eth_header;
3152 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3153 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3155 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3157 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3159 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3161 /* Clear a space in the inbox for eth header */
3162 switch (header_id) {
3163 case MLX4_NET_TRANS_RULE_ID_IPV4:
3165 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3166 memmove(ip_header, eth_header,
3167 sizeof(*ip_header) + sizeof(*l4_header));
3169 case MLX4_NET_TRANS_RULE_ID_TCP:
3170 case MLX4_NET_TRANS_RULE_ID_UDP:
3171 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3173 memmove(l4_header, eth_header, sizeof(*l4_header));
3178 list_for_each_entry_safe(res, tmp, rlist, list) {
3179 if (port == res->port) {
3180 be_mac = cpu_to_be64(res->mac << 16);
3185 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3190 memset(eth_header, 0, sizeof(*eth_header));
3191 eth_header->size = sizeof(*eth_header) >> 2;
3192 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3193 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3194 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3200 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3201 struct mlx4_vhcr *vhcr,
3202 struct mlx4_cmd_mailbox *inbox,
3203 struct mlx4_cmd_mailbox *outbox,
3204 struct mlx4_cmd_info *cmd)
3207 struct mlx4_priv *priv = mlx4_priv(dev);
3208 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3209 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3213 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3214 struct _rule_hw *rule_header;
3217 if (dev->caps.steering_mode !=
3218 MLX4_STEERING_MODE_DEVICE_MANAGED)
3221 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3222 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3223 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3225 pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3228 rule_header = (struct _rule_hw *)(ctrl + 1);
3229 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3231 switch (header_id) {
3232 case MLX4_NET_TRANS_RULE_ID_ETH:
3233 if (validate_eth_header_mac(slave, rule_header, rlist)) {
3238 case MLX4_NET_TRANS_RULE_ID_IB:
3240 case MLX4_NET_TRANS_RULE_ID_IPV4:
3241 case MLX4_NET_TRANS_RULE_ID_TCP:
3242 case MLX4_NET_TRANS_RULE_ID_UDP:
3243 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3244 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3248 vhcr->in_modifier +=
3249 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3252 pr_err("Corrupted mailbox.\n");
3257 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3258 vhcr->in_modifier, 0,
3259 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3264 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3266 mlx4_err(dev, "Fail to add flow steering resources.\n ");
3268 mlx4_cmd(dev, vhcr->out_param, 0, 0,
3269 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3273 atomic_inc(&rqp->ref_count);
3275 put_res(dev, slave, qpn, RES_QP);
3279 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3280 struct mlx4_vhcr *vhcr,
3281 struct mlx4_cmd_mailbox *inbox,
3282 struct mlx4_cmd_mailbox *outbox,
3283 struct mlx4_cmd_info *cmd)
3287 struct res_fs_rule *rrule;
3289 if (dev->caps.steering_mode !=
3290 MLX4_STEERING_MODE_DEVICE_MANAGED)
3293 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3296 /* Release the rule form busy state before removal */
3297 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3298 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3302 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
3304 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3308 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3309 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3312 atomic_dec(&rqp->ref_count);
3314 put_res(dev, slave, rrule->qpn, RES_QP);
3319 BUSY_MAX_RETRIES = 10
3322 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3323 struct mlx4_vhcr *vhcr,
3324 struct mlx4_cmd_mailbox *inbox,
3325 struct mlx4_cmd_mailbox *outbox,
3326 struct mlx4_cmd_info *cmd)
3329 int index = vhcr->in_modifier & 0xffff;
3331 err = get_res(dev, slave, index, RES_COUNTER, NULL);
3335 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3336 put_res(dev, slave, index, RES_COUNTER);
3340 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3342 struct res_gid *rgid;
3343 struct res_gid *tmp;
3344 struct mlx4_qp qp; /* dummy for calling attach/detach */
3346 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3347 switch (dev->caps.steering_mode) {
3348 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3349 mlx4_flow_detach(dev, rgid->reg_id);
3351 case MLX4_STEERING_MODE_B0:
3352 qp.qpn = rqp->local_qpn;
3353 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3354 rgid->prot, rgid->steer);
3357 list_del(&rgid->list);
3362 static int _move_all_busy(struct mlx4_dev *dev, int slave,
3363 enum mlx4_resource type, int print)
3365 struct mlx4_priv *priv = mlx4_priv(dev);
3366 struct mlx4_resource_tracker *tracker =
3367 &priv->mfunc.master.res_tracker;
3368 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
3369 struct res_common *r;
3370 struct res_common *tmp;
3374 spin_lock_irq(mlx4_tlock(dev));
3375 list_for_each_entry_safe(r, tmp, rlist, list) {
3376 if (r->owner == slave) {
3378 if (r->state == RES_ANY_BUSY) {
3381 "%s id 0x%llx is busy\n",
3386 r->from_state = r->state;
3387 r->state = RES_ANY_BUSY;
3393 spin_unlock_irq(mlx4_tlock(dev));
3398 static int move_all_busy(struct mlx4_dev *dev, int slave,
3399 enum mlx4_resource type)
3401 unsigned long begin;
3406 busy = _move_all_busy(dev, slave, type, 0);
3407 if (time_after(jiffies, begin + 5 * HZ))
3414 busy = _move_all_busy(dev, slave, type, 1);
3418 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3420 struct mlx4_priv *priv = mlx4_priv(dev);
3421 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3422 struct list_head *qp_list =
3423 &tracker->slave_list[slave].res_list[RES_QP];
3431 err = move_all_busy(dev, slave, RES_QP);
3433 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3434 "for slave %d\n", slave);
3436 spin_lock_irq(mlx4_tlock(dev));
3437 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3438 spin_unlock_irq(mlx4_tlock(dev));
3439 if (qp->com.owner == slave) {
3440 qpn = qp->com.res_id;
3441 detach_qp(dev, slave, qp);
3442 state = qp->com.from_state;
3443 while (state != 0) {
3445 case RES_QP_RESERVED:
3446 spin_lock_irq(mlx4_tlock(dev));
3447 rb_erase(&qp->com.node,
3448 &tracker->res_tree[RES_QP]);
3449 list_del(&qp->com.list);
3450 spin_unlock_irq(mlx4_tlock(dev));
3455 if (!valid_reserved(dev, slave, qpn))
3456 __mlx4_qp_free_icm(dev, qpn);
3457 state = RES_QP_RESERVED;
3461 err = mlx4_cmd(dev, in_param,
3464 MLX4_CMD_TIME_CLASS_A,
3467 mlx4_dbg(dev, "rem_slave_qps: failed"
3468 " to move slave %d qpn %d to"
3471 atomic_dec(&qp->rcq->ref_count);
3472 atomic_dec(&qp->scq->ref_count);
3473 atomic_dec(&qp->mtt->ref_count);
3475 atomic_dec(&qp->srq->ref_count);
3476 state = RES_QP_MAPPED;
3483 spin_lock_irq(mlx4_tlock(dev));
3485 spin_unlock_irq(mlx4_tlock(dev));
3488 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3490 struct mlx4_priv *priv = mlx4_priv(dev);
3491 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3492 struct list_head *srq_list =
3493 &tracker->slave_list[slave].res_list[RES_SRQ];
3494 struct res_srq *srq;
3495 struct res_srq *tmp;
3502 err = move_all_busy(dev, slave, RES_SRQ);
3504 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3505 "busy for slave %d\n", slave);
3507 spin_lock_irq(mlx4_tlock(dev));
3508 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3509 spin_unlock_irq(mlx4_tlock(dev));
3510 if (srq->com.owner == slave) {
3511 srqn = srq->com.res_id;
3512 state = srq->com.from_state;
3513 while (state != 0) {
3515 case RES_SRQ_ALLOCATED:
3516 __mlx4_srq_free_icm(dev, srqn);
3517 spin_lock_irq(mlx4_tlock(dev));
3518 rb_erase(&srq->com.node,
3519 &tracker->res_tree[RES_SRQ]);
3520 list_del(&srq->com.list);
3521 spin_unlock_irq(mlx4_tlock(dev));
3528 err = mlx4_cmd(dev, in_param, srqn, 1,
3530 MLX4_CMD_TIME_CLASS_A,
3533 mlx4_dbg(dev, "rem_slave_srqs: failed"
3534 " to move slave %d srq %d to"
3538 atomic_dec(&srq->mtt->ref_count);
3540 atomic_dec(&srq->cq->ref_count);
3541 state = RES_SRQ_ALLOCATED;
3549 spin_lock_irq(mlx4_tlock(dev));
3551 spin_unlock_irq(mlx4_tlock(dev));
3554 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3556 struct mlx4_priv *priv = mlx4_priv(dev);
3557 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3558 struct list_head *cq_list =
3559 &tracker->slave_list[slave].res_list[RES_CQ];
3568 err = move_all_busy(dev, slave, RES_CQ);
3570 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3571 "busy for slave %d\n", slave);
3573 spin_lock_irq(mlx4_tlock(dev));
3574 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3575 spin_unlock_irq(mlx4_tlock(dev));
3576 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3577 cqn = cq->com.res_id;
3578 state = cq->com.from_state;
3579 while (state != 0) {
3581 case RES_CQ_ALLOCATED:
3582 __mlx4_cq_free_icm(dev, cqn);
3583 spin_lock_irq(mlx4_tlock(dev));
3584 rb_erase(&cq->com.node,
3585 &tracker->res_tree[RES_CQ]);
3586 list_del(&cq->com.list);
3587 spin_unlock_irq(mlx4_tlock(dev));
3594 err = mlx4_cmd(dev, in_param, cqn, 1,
3596 MLX4_CMD_TIME_CLASS_A,
3599 mlx4_dbg(dev, "rem_slave_cqs: failed"
3600 " to move slave %d cq %d to"
3603 atomic_dec(&cq->mtt->ref_count);
3604 state = RES_CQ_ALLOCATED;
3612 spin_lock_irq(mlx4_tlock(dev));
3614 spin_unlock_irq(mlx4_tlock(dev));
3617 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3619 struct mlx4_priv *priv = mlx4_priv(dev);
3620 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3621 struct list_head *mpt_list =
3622 &tracker->slave_list[slave].res_list[RES_MPT];
3623 struct res_mpt *mpt;
3624 struct res_mpt *tmp;
3631 err = move_all_busy(dev, slave, RES_MPT);
3633 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3634 "busy for slave %d\n", slave);
3636 spin_lock_irq(mlx4_tlock(dev));
3637 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3638 spin_unlock_irq(mlx4_tlock(dev));
3639 if (mpt->com.owner == slave) {
3640 mptn = mpt->com.res_id;
3641 state = mpt->com.from_state;
3642 while (state != 0) {
3644 case RES_MPT_RESERVED:
3645 __mlx4_mpt_release(dev, mpt->key);
3646 spin_lock_irq(mlx4_tlock(dev));
3647 rb_erase(&mpt->com.node,
3648 &tracker->res_tree[RES_MPT]);
3649 list_del(&mpt->com.list);
3650 spin_unlock_irq(mlx4_tlock(dev));
3655 case RES_MPT_MAPPED:
3656 __mlx4_mpt_free_icm(dev, mpt->key);
3657 state = RES_MPT_RESERVED;
3662 err = mlx4_cmd(dev, in_param, mptn, 0,
3664 MLX4_CMD_TIME_CLASS_A,
3667 mlx4_dbg(dev, "rem_slave_mrs: failed"
3668 " to move slave %d mpt %d to"
3672 atomic_dec(&mpt->mtt->ref_count);
3673 state = RES_MPT_MAPPED;
3680 spin_lock_irq(mlx4_tlock(dev));
3682 spin_unlock_irq(mlx4_tlock(dev));
3685 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3687 struct mlx4_priv *priv = mlx4_priv(dev);
3688 struct mlx4_resource_tracker *tracker =
3689 &priv->mfunc.master.res_tracker;
3690 struct list_head *mtt_list =
3691 &tracker->slave_list[slave].res_list[RES_MTT];
3692 struct res_mtt *mtt;
3693 struct res_mtt *tmp;
3699 err = move_all_busy(dev, slave, RES_MTT);
3701 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3702 "busy for slave %d\n", slave);
3704 spin_lock_irq(mlx4_tlock(dev));
3705 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3706 spin_unlock_irq(mlx4_tlock(dev));
3707 if (mtt->com.owner == slave) {
3708 base = mtt->com.res_id;
3709 state = mtt->com.from_state;
3710 while (state != 0) {
3712 case RES_MTT_ALLOCATED:
3713 __mlx4_free_mtt_range(dev, base,
3715 spin_lock_irq(mlx4_tlock(dev));
3716 rb_erase(&mtt->com.node,
3717 &tracker->res_tree[RES_MTT]);
3718 list_del(&mtt->com.list);
3719 spin_unlock_irq(mlx4_tlock(dev));
3729 spin_lock_irq(mlx4_tlock(dev));
3731 spin_unlock_irq(mlx4_tlock(dev));
3734 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3736 struct mlx4_priv *priv = mlx4_priv(dev);
3737 struct mlx4_resource_tracker *tracker =
3738 &priv->mfunc.master.res_tracker;
3739 struct list_head *fs_rule_list =
3740 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3741 struct res_fs_rule *fs_rule;
3742 struct res_fs_rule *tmp;
3747 err = move_all_busy(dev, slave, RES_FS_RULE);
3749 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3752 spin_lock_irq(mlx4_tlock(dev));
3753 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3754 spin_unlock_irq(mlx4_tlock(dev));
3755 if (fs_rule->com.owner == slave) {
3756 base = fs_rule->com.res_id;
3757 state = fs_rule->com.from_state;
3758 while (state != 0) {
3760 case RES_FS_RULE_ALLOCATED:
3762 err = mlx4_cmd(dev, base, 0, 0,
3763 MLX4_QP_FLOW_STEERING_DETACH,
3764 MLX4_CMD_TIME_CLASS_A,
3767 spin_lock_irq(mlx4_tlock(dev));
3768 rb_erase(&fs_rule->com.node,
3769 &tracker->res_tree[RES_FS_RULE]);
3770 list_del(&fs_rule->com.list);
3771 spin_unlock_irq(mlx4_tlock(dev));
3781 spin_lock_irq(mlx4_tlock(dev));
3783 spin_unlock_irq(mlx4_tlock(dev));
3786 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3788 struct mlx4_priv *priv = mlx4_priv(dev);
3789 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3790 struct list_head *eq_list =
3791 &tracker->slave_list[slave].res_list[RES_EQ];
3798 struct mlx4_cmd_mailbox *mailbox;
3800 err = move_all_busy(dev, slave, RES_EQ);
3802 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3803 "busy for slave %d\n", slave);
3805 spin_lock_irq(mlx4_tlock(dev));
3806 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3807 spin_unlock_irq(mlx4_tlock(dev));
3808 if (eq->com.owner == slave) {
3809 eqn = eq->com.res_id;
3810 state = eq->com.from_state;
3811 while (state != 0) {
3813 case RES_EQ_RESERVED:
3814 spin_lock_irq(mlx4_tlock(dev));
3815 rb_erase(&eq->com.node,
3816 &tracker->res_tree[RES_EQ]);
3817 list_del(&eq->com.list);
3818 spin_unlock_irq(mlx4_tlock(dev));
3824 mailbox = mlx4_alloc_cmd_mailbox(dev);
3825 if (IS_ERR(mailbox)) {
3829 err = mlx4_cmd_box(dev, slave, 0,
3832 MLX4_CMD_TIME_CLASS_A,
3835 mlx4_dbg(dev, "rem_slave_eqs: failed"
3836 " to move slave %d eqs %d to"
3837 " SW ownership\n", slave, eqn);
3838 mlx4_free_cmd_mailbox(dev, mailbox);
3839 atomic_dec(&eq->mtt->ref_count);
3840 state = RES_EQ_RESERVED;
3848 spin_lock_irq(mlx4_tlock(dev));
3850 spin_unlock_irq(mlx4_tlock(dev));
3853 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3855 struct mlx4_priv *priv = mlx4_priv(dev);
3856 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3857 struct list_head *counter_list =
3858 &tracker->slave_list[slave].res_list[RES_COUNTER];
3859 struct res_counter *counter;
3860 struct res_counter *tmp;
3864 err = move_all_busy(dev, slave, RES_COUNTER);
3866 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3867 "busy for slave %d\n", slave);
3869 spin_lock_irq(mlx4_tlock(dev));
3870 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3871 if (counter->com.owner == slave) {
3872 index = counter->com.res_id;
3873 rb_erase(&counter->com.node,
3874 &tracker->res_tree[RES_COUNTER]);
3875 list_del(&counter->com.list);
3877 __mlx4_counter_free(dev, index);
3880 spin_unlock_irq(mlx4_tlock(dev));
3883 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3885 struct mlx4_priv *priv = mlx4_priv(dev);
3886 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3887 struct list_head *xrcdn_list =
3888 &tracker->slave_list[slave].res_list[RES_XRCD];
3889 struct res_xrcdn *xrcd;
3890 struct res_xrcdn *tmp;
3894 err = move_all_busy(dev, slave, RES_XRCD);
3896 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3897 "busy for slave %d\n", slave);
3899 spin_lock_irq(mlx4_tlock(dev));
3900 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3901 if (xrcd->com.owner == slave) {
3902 xrcdn = xrcd->com.res_id;
3903 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3904 list_del(&xrcd->com.list);
3906 __mlx4_xrcd_free(dev, xrcdn);
3909 spin_unlock_irq(mlx4_tlock(dev));
3912 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3914 struct mlx4_priv *priv = mlx4_priv(dev);
3916 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3918 rem_slave_macs(dev, slave);
3919 rem_slave_fs_rule(dev, slave);
3920 rem_slave_qps(dev, slave);
3921 rem_slave_srqs(dev, slave);
3922 rem_slave_cqs(dev, slave);
3923 rem_slave_mrs(dev, slave);
3924 rem_slave_eqs(dev, slave);
3925 rem_slave_mtts(dev, slave);
3926 rem_slave_counters(dev, slave);
3927 rem_slave_xrcdns(dev, slave);
3928 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);