2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
50 #define MLX4_MAC_VALID (1ull << 63)
53 struct list_head list;
59 struct list_head list;
74 struct list_head list;
76 enum mlx4_protocol prot;
77 enum mlx4_steer_type steer;
81 RES_QP_BUSY = RES_ANY_BUSY,
83 /* QP number was allocated */
86 /* ICM memory for QP context was mapped */
89 /* QP is in hw ownership */
94 struct res_common com;
99 struct list_head mcg_list;
104 enum res_mtt_states {
105 RES_MTT_BUSY = RES_ANY_BUSY,
109 static inline const char *mtt_states_str(enum res_mtt_states state)
112 case RES_MTT_BUSY: return "RES_MTT_BUSY";
113 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
114 default: return "Unknown";
119 struct res_common com;
124 enum res_mpt_states {
125 RES_MPT_BUSY = RES_ANY_BUSY,
132 struct res_common com;
138 RES_EQ_BUSY = RES_ANY_BUSY,
144 struct res_common com;
149 RES_CQ_BUSY = RES_ANY_BUSY,
155 struct res_common com;
160 enum res_srq_states {
161 RES_SRQ_BUSY = RES_ANY_BUSY,
167 struct res_common com;
173 enum res_counter_states {
174 RES_COUNTER_BUSY = RES_ANY_BUSY,
175 RES_COUNTER_ALLOCATED,
179 struct res_common com;
183 enum res_xrcdn_states {
184 RES_XRCD_BUSY = RES_ANY_BUSY,
189 struct res_common com;
193 enum res_fs_rule_states {
194 RES_FS_RULE_BUSY = RES_ANY_BUSY,
195 RES_FS_RULE_ALLOCATED,
199 struct res_common com;
202 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
204 struct rb_node *node = root->rb_node;
207 struct res_common *res = container_of(node, struct res_common,
210 if (res_id < res->res_id)
211 node = node->rb_left;
212 else if (res_id > res->res_id)
213 node = node->rb_right;
220 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
222 struct rb_node **new = &(root->rb_node), *parent = NULL;
224 /* Figure out where to put new node */
226 struct res_common *this = container_of(*new, struct res_common,
230 if (res->res_id < this->res_id)
231 new = &((*new)->rb_left);
232 else if (res->res_id > this->res_id)
233 new = &((*new)->rb_right);
238 /* Add new node and rebalance tree. */
239 rb_link_node(&res->node, parent, new);
240 rb_insert_color(&res->node, root);
246 static const char *ResourceType(enum mlx4_resource rt)
249 case RES_QP: return "RES_QP";
250 case RES_CQ: return "RES_CQ";
251 case RES_SRQ: return "RES_SRQ";
252 case RES_MPT: return "RES_MPT";
253 case RES_MTT: return "RES_MTT";
254 case RES_MAC: return "RES_MAC";
255 case RES_EQ: return "RES_EQ";
256 case RES_COUNTER: return "RES_COUNTER";
257 case RES_FS_RULE: return "RES_FS_RULE";
258 case RES_XRCD: return "RES_XRCD";
259 default: return "Unknown resource type !!!";
263 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
265 struct mlx4_priv *priv = mlx4_priv(dev);
269 priv->mfunc.master.res_tracker.slave_list =
270 kzalloc(dev->num_slaves * sizeof(struct slave_list),
272 if (!priv->mfunc.master.res_tracker.slave_list)
275 for (i = 0 ; i < dev->num_slaves; i++) {
276 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
277 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
278 slave_list[i].res_list[t]);
279 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
282 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
284 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
285 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
287 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
291 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
292 enum mlx4_res_tracker_free_type type)
294 struct mlx4_priv *priv = mlx4_priv(dev);
297 if (priv->mfunc.master.res_tracker.slave_list) {
298 if (type != RES_TR_FREE_STRUCTS_ONLY)
299 for (i = 0 ; i < dev->num_slaves; i++)
300 if (type == RES_TR_FREE_ALL ||
301 dev->caps.function != i)
302 mlx4_delete_all_resources_for_slave(dev, i);
304 if (type != RES_TR_FREE_SLAVES_ONLY) {
305 kfree(priv->mfunc.master.res_tracker.slave_list);
306 priv->mfunc.master.res_tracker.slave_list = NULL;
311 static void update_ud_gid(struct mlx4_dev *dev,
312 struct mlx4_qp_context *qp_ctx, u8 slave)
314 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
316 if (MLX4_QP_ST_UD == ts)
317 qp_ctx->pri_path.mgid_index = 0x80 | slave;
319 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
320 slave, qp_ctx->pri_path.mgid_index);
323 static int mpt_mask(struct mlx4_dev *dev)
325 return dev->caps.num_mpts - 1;
328 static void *find_res(struct mlx4_dev *dev, int res_id,
329 enum mlx4_resource type)
331 struct mlx4_priv *priv = mlx4_priv(dev);
333 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
337 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
338 enum mlx4_resource type,
341 struct res_common *r;
344 spin_lock_irq(mlx4_tlock(dev));
345 r = find_res(dev, res_id, type);
351 if (r->state == RES_ANY_BUSY) {
356 if (r->owner != slave) {
361 r->from_state = r->state;
362 r->state = RES_ANY_BUSY;
363 mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
364 ResourceType(type), r->res_id);
367 *((struct res_common **)res) = r;
370 spin_unlock_irq(mlx4_tlock(dev));
374 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
375 enum mlx4_resource type,
376 u64 res_id, int *slave)
379 struct res_common *r;
385 spin_lock(mlx4_tlock(dev));
387 r = find_res(dev, id, type);
392 spin_unlock(mlx4_tlock(dev));
397 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
398 enum mlx4_resource type)
400 struct res_common *r;
402 spin_lock_irq(mlx4_tlock(dev));
403 r = find_res(dev, res_id, type);
405 r->state = r->from_state;
406 spin_unlock_irq(mlx4_tlock(dev));
409 static struct res_common *alloc_qp_tr(int id)
413 ret = kzalloc(sizeof *ret, GFP_KERNEL);
417 ret->com.res_id = id;
418 ret->com.state = RES_QP_RESERVED;
420 INIT_LIST_HEAD(&ret->mcg_list);
421 spin_lock_init(&ret->mcg_spl);
426 static struct res_common *alloc_mtt_tr(int id, int order)
430 ret = kzalloc(sizeof *ret, GFP_KERNEL);
434 ret->com.res_id = id;
436 ret->com.state = RES_MTT_ALLOCATED;
437 atomic_set(&ret->ref_count, 0);
442 static struct res_common *alloc_mpt_tr(int id, int key)
446 ret = kzalloc(sizeof *ret, GFP_KERNEL);
450 ret->com.res_id = id;
451 ret->com.state = RES_MPT_RESERVED;
457 static struct res_common *alloc_eq_tr(int id)
461 ret = kzalloc(sizeof *ret, GFP_KERNEL);
465 ret->com.res_id = id;
466 ret->com.state = RES_EQ_RESERVED;
471 static struct res_common *alloc_cq_tr(int id)
475 ret = kzalloc(sizeof *ret, GFP_KERNEL);
479 ret->com.res_id = id;
480 ret->com.state = RES_CQ_ALLOCATED;
481 atomic_set(&ret->ref_count, 0);
486 static struct res_common *alloc_srq_tr(int id)
490 ret = kzalloc(sizeof *ret, GFP_KERNEL);
494 ret->com.res_id = id;
495 ret->com.state = RES_SRQ_ALLOCATED;
496 atomic_set(&ret->ref_count, 0);
501 static struct res_common *alloc_counter_tr(int id)
503 struct res_counter *ret;
505 ret = kzalloc(sizeof *ret, GFP_KERNEL);
509 ret->com.res_id = id;
510 ret->com.state = RES_COUNTER_ALLOCATED;
515 static struct res_common *alloc_xrcdn_tr(int id)
517 struct res_xrcdn *ret;
519 ret = kzalloc(sizeof *ret, GFP_KERNEL);
523 ret->com.res_id = id;
524 ret->com.state = RES_XRCD_ALLOCATED;
529 static struct res_common *alloc_fs_rule_tr(u64 id)
531 struct res_fs_rule *ret;
533 ret = kzalloc(sizeof *ret, GFP_KERNEL);
537 ret->com.res_id = id;
538 ret->com.state = RES_FS_RULE_ALLOCATED;
543 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
546 struct res_common *ret;
550 ret = alloc_qp_tr(id);
553 ret = alloc_mpt_tr(id, extra);
556 ret = alloc_mtt_tr(id, extra);
559 ret = alloc_eq_tr(id);
562 ret = alloc_cq_tr(id);
565 ret = alloc_srq_tr(id);
568 printk(KERN_ERR "implementation missing\n");
571 ret = alloc_counter_tr(id);
574 ret = alloc_xrcdn_tr(id);
577 ret = alloc_fs_rule_tr(id);
588 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
589 enum mlx4_resource type, int extra)
593 struct mlx4_priv *priv = mlx4_priv(dev);
594 struct res_common **res_arr;
595 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
596 struct rb_root *root = &tracker->res_tree[type];
598 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
602 for (i = 0; i < count; ++i) {
603 res_arr[i] = alloc_tr(base + i, type, slave, extra);
605 for (--i; i >= 0; --i)
613 spin_lock_irq(mlx4_tlock(dev));
614 for (i = 0; i < count; ++i) {
615 if (find_res(dev, base + i, type)) {
619 err = res_tracker_insert(root, res_arr[i]);
622 list_add_tail(&res_arr[i]->list,
623 &tracker->slave_list[slave].res_list[type]);
625 spin_unlock_irq(mlx4_tlock(dev));
631 for (--i; i >= base; --i)
632 rb_erase(&res_arr[i]->node, root);
634 spin_unlock_irq(mlx4_tlock(dev));
636 for (i = 0; i < count; ++i)
644 static int remove_qp_ok(struct res_qp *res)
646 if (res->com.state == RES_QP_BUSY)
648 else if (res->com.state != RES_QP_RESERVED)
654 static int remove_mtt_ok(struct res_mtt *res, int order)
656 if (res->com.state == RES_MTT_BUSY ||
657 atomic_read(&res->ref_count)) {
658 printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
660 mtt_states_str(res->com.state),
661 atomic_read(&res->ref_count));
663 } else if (res->com.state != RES_MTT_ALLOCATED)
665 else if (res->order != order)
671 static int remove_mpt_ok(struct res_mpt *res)
673 if (res->com.state == RES_MPT_BUSY)
675 else if (res->com.state != RES_MPT_RESERVED)
681 static int remove_eq_ok(struct res_eq *res)
683 if (res->com.state == RES_MPT_BUSY)
685 else if (res->com.state != RES_MPT_RESERVED)
691 static int remove_counter_ok(struct res_counter *res)
693 if (res->com.state == RES_COUNTER_BUSY)
695 else if (res->com.state != RES_COUNTER_ALLOCATED)
701 static int remove_xrcdn_ok(struct res_xrcdn *res)
703 if (res->com.state == RES_XRCD_BUSY)
705 else if (res->com.state != RES_XRCD_ALLOCATED)
711 static int remove_fs_rule_ok(struct res_fs_rule *res)
713 if (res->com.state == RES_FS_RULE_BUSY)
715 else if (res->com.state != RES_FS_RULE_ALLOCATED)
721 static int remove_cq_ok(struct res_cq *res)
723 if (res->com.state == RES_CQ_BUSY)
725 else if (res->com.state != RES_CQ_ALLOCATED)
731 static int remove_srq_ok(struct res_srq *res)
733 if (res->com.state == RES_SRQ_BUSY)
735 else if (res->com.state != RES_SRQ_ALLOCATED)
741 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
745 return remove_qp_ok((struct res_qp *)res);
747 return remove_cq_ok((struct res_cq *)res);
749 return remove_srq_ok((struct res_srq *)res);
751 return remove_mpt_ok((struct res_mpt *)res);
753 return remove_mtt_ok((struct res_mtt *)res, extra);
757 return remove_eq_ok((struct res_eq *)res);
759 return remove_counter_ok((struct res_counter *)res);
761 return remove_xrcdn_ok((struct res_xrcdn *)res);
763 return remove_fs_rule_ok((struct res_fs_rule *)res);
769 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
770 enum mlx4_resource type, int extra)
774 struct mlx4_priv *priv = mlx4_priv(dev);
775 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
776 struct res_common *r;
778 spin_lock_irq(mlx4_tlock(dev));
779 for (i = base; i < base + count; ++i) {
780 r = res_tracker_lookup(&tracker->res_tree[type], i);
785 if (r->owner != slave) {
789 err = remove_ok(r, type, extra);
794 for (i = base; i < base + count; ++i) {
795 r = res_tracker_lookup(&tracker->res_tree[type], i);
796 rb_erase(&r->node, &tracker->res_tree[type]);
803 spin_unlock_irq(mlx4_tlock(dev));
808 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
809 enum res_qp_states state, struct res_qp **qp,
812 struct mlx4_priv *priv = mlx4_priv(dev);
813 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
817 spin_lock_irq(mlx4_tlock(dev));
818 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
821 else if (r->com.owner != slave)
826 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
827 __func__, r->com.res_id);
831 case RES_QP_RESERVED:
832 if (r->com.state == RES_QP_MAPPED && !alloc)
835 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
840 if ((r->com.state == RES_QP_RESERVED && alloc) ||
841 r->com.state == RES_QP_HW)
844 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
852 if (r->com.state != RES_QP_MAPPED)
860 r->com.from_state = r->com.state;
861 r->com.to_state = state;
862 r->com.state = RES_QP_BUSY;
868 spin_unlock_irq(mlx4_tlock(dev));
873 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
874 enum res_mpt_states state, struct res_mpt **mpt)
876 struct mlx4_priv *priv = mlx4_priv(dev);
877 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
881 spin_lock_irq(mlx4_tlock(dev));
882 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
885 else if (r->com.owner != slave)
893 case RES_MPT_RESERVED:
894 if (r->com.state != RES_MPT_MAPPED)
899 if (r->com.state != RES_MPT_RESERVED &&
900 r->com.state != RES_MPT_HW)
905 if (r->com.state != RES_MPT_MAPPED)
913 r->com.from_state = r->com.state;
914 r->com.to_state = state;
915 r->com.state = RES_MPT_BUSY;
921 spin_unlock_irq(mlx4_tlock(dev));
926 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
927 enum res_eq_states state, struct res_eq **eq)
929 struct mlx4_priv *priv = mlx4_priv(dev);
930 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
934 spin_lock_irq(mlx4_tlock(dev));
935 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
938 else if (r->com.owner != slave)
946 case RES_EQ_RESERVED:
947 if (r->com.state != RES_EQ_HW)
952 if (r->com.state != RES_EQ_RESERVED)
961 r->com.from_state = r->com.state;
962 r->com.to_state = state;
963 r->com.state = RES_EQ_BUSY;
969 spin_unlock_irq(mlx4_tlock(dev));
974 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
975 enum res_cq_states state, struct res_cq **cq)
977 struct mlx4_priv *priv = mlx4_priv(dev);
978 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
982 spin_lock_irq(mlx4_tlock(dev));
983 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
986 else if (r->com.owner != slave)
994 case RES_CQ_ALLOCATED:
995 if (r->com.state != RES_CQ_HW)
997 else if (atomic_read(&r->ref_count))
1004 if (r->com.state != RES_CQ_ALLOCATED)
1015 r->com.from_state = r->com.state;
1016 r->com.to_state = state;
1017 r->com.state = RES_CQ_BUSY;
1023 spin_unlock_irq(mlx4_tlock(dev));
1028 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1029 enum res_cq_states state, struct res_srq **srq)
1031 struct mlx4_priv *priv = mlx4_priv(dev);
1032 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1036 spin_lock_irq(mlx4_tlock(dev));
1037 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1040 else if (r->com.owner != slave)
1048 case RES_SRQ_ALLOCATED:
1049 if (r->com.state != RES_SRQ_HW)
1051 else if (atomic_read(&r->ref_count))
1056 if (r->com.state != RES_SRQ_ALLOCATED)
1065 r->com.from_state = r->com.state;
1066 r->com.to_state = state;
1067 r->com.state = RES_SRQ_BUSY;
1073 spin_unlock_irq(mlx4_tlock(dev));
1078 static void res_abort_move(struct mlx4_dev *dev, int slave,
1079 enum mlx4_resource type, int id)
1081 struct mlx4_priv *priv = mlx4_priv(dev);
1082 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1083 struct res_common *r;
1085 spin_lock_irq(mlx4_tlock(dev));
1086 r = res_tracker_lookup(&tracker->res_tree[type], id);
1087 if (r && (r->owner == slave))
1088 r->state = r->from_state;
1089 spin_unlock_irq(mlx4_tlock(dev));
1092 static void res_end_move(struct mlx4_dev *dev, int slave,
1093 enum mlx4_resource type, int id)
1095 struct mlx4_priv *priv = mlx4_priv(dev);
1096 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1097 struct res_common *r;
1099 spin_lock_irq(mlx4_tlock(dev));
1100 r = res_tracker_lookup(&tracker->res_tree[type], id);
1101 if (r && (r->owner == slave))
1102 r->state = r->to_state;
1103 spin_unlock_irq(mlx4_tlock(dev));
1106 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1108 return mlx4_is_qp_reserved(dev, qpn);
1111 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1112 u64 in_param, u64 *out_param)
1121 case RES_OP_RESERVE:
1122 count = get_param_l(&in_param);
1123 align = get_param_h(&in_param);
1124 err = __mlx4_qp_reserve_range(dev, count, align, &base);
1128 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1130 __mlx4_qp_release_range(dev, base, count);
1133 set_param_l(out_param, base);
1135 case RES_OP_MAP_ICM:
1136 qpn = get_param_l(&in_param) & 0x7fffff;
1137 if (valid_reserved(dev, slave, qpn)) {
1138 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1143 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1148 if (!valid_reserved(dev, slave, qpn)) {
1149 err = __mlx4_qp_alloc_icm(dev, qpn);
1151 res_abort_move(dev, slave, RES_QP, qpn);
1156 res_end_move(dev, slave, RES_QP, qpn);
1166 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1167 u64 in_param, u64 *out_param)
1173 if (op != RES_OP_RESERVE_AND_MAP)
1176 order = get_param_l(&in_param);
1177 base = __mlx4_alloc_mtt_range(dev, order);
1181 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1183 __mlx4_free_mtt_range(dev, base, order);
1185 set_param_l(out_param, base);
1190 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1191 u64 in_param, u64 *out_param)
1196 struct res_mpt *mpt;
1199 case RES_OP_RESERVE:
1200 index = __mlx4_mr_reserve(dev);
1203 id = index & mpt_mask(dev);
1205 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1207 __mlx4_mr_release(dev, index);
1210 set_param_l(out_param, index);
1212 case RES_OP_MAP_ICM:
1213 index = get_param_l(&in_param);
1214 id = index & mpt_mask(dev);
1215 err = mr_res_start_move_to(dev, slave, id,
1216 RES_MPT_MAPPED, &mpt);
1220 err = __mlx4_mr_alloc_icm(dev, mpt->key);
1222 res_abort_move(dev, slave, RES_MPT, id);
1226 res_end_move(dev, slave, RES_MPT, id);
1232 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1233 u64 in_param, u64 *out_param)
1239 case RES_OP_RESERVE_AND_MAP:
1240 err = __mlx4_cq_alloc_icm(dev, &cqn);
1244 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1246 __mlx4_cq_free_icm(dev, cqn);
1250 set_param_l(out_param, cqn);
1260 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1261 u64 in_param, u64 *out_param)
1267 case RES_OP_RESERVE_AND_MAP:
1268 err = __mlx4_srq_alloc_icm(dev, &srqn);
1272 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1274 __mlx4_srq_free_icm(dev, srqn);
1278 set_param_l(out_param, srqn);
1288 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1290 struct mlx4_priv *priv = mlx4_priv(dev);
1291 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1292 struct mac_res *res;
1294 res = kzalloc(sizeof *res, GFP_KERNEL);
1298 res->port = (u8) port;
1299 list_add_tail(&res->list,
1300 &tracker->slave_list[slave].res_list[RES_MAC]);
1304 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1307 struct mlx4_priv *priv = mlx4_priv(dev);
1308 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1309 struct list_head *mac_list =
1310 &tracker->slave_list[slave].res_list[RES_MAC];
1311 struct mac_res *res, *tmp;
1313 list_for_each_entry_safe(res, tmp, mac_list, list) {
1314 if (res->mac == mac && res->port == (u8) port) {
1315 list_del(&res->list);
1322 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1324 struct mlx4_priv *priv = mlx4_priv(dev);
1325 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1326 struct list_head *mac_list =
1327 &tracker->slave_list[slave].res_list[RES_MAC];
1328 struct mac_res *res, *tmp;
1330 list_for_each_entry_safe(res, tmp, mac_list, list) {
1331 list_del(&res->list);
1332 __mlx4_unregister_mac(dev, res->port, res->mac);
1337 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1338 u64 in_param, u64 *out_param)
1344 if (op != RES_OP_RESERVE_AND_MAP)
1347 port = get_param_l(out_param);
1350 err = __mlx4_register_mac(dev, port, mac);
1352 set_param_l(out_param, err);
1357 err = mac_add_to_slave(dev, slave, mac, port);
1359 __mlx4_unregister_mac(dev, port, mac);
1364 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1365 u64 in_param, u64 *out_param)
1370 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1371 u64 in_param, u64 *out_param)
1376 if (op != RES_OP_RESERVE)
1379 err = __mlx4_counter_alloc(dev, &index);
1383 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1385 __mlx4_counter_free(dev, index);
1387 set_param_l(out_param, index);
1392 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1393 u64 in_param, u64 *out_param)
1398 if (op != RES_OP_RESERVE)
1401 err = __mlx4_xrcd_alloc(dev, &xrcdn);
1405 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1407 __mlx4_xrcd_free(dev, xrcdn);
1409 set_param_l(out_param, xrcdn);
1414 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
1415 struct mlx4_vhcr *vhcr,
1416 struct mlx4_cmd_mailbox *inbox,
1417 struct mlx4_cmd_mailbox *outbox,
1418 struct mlx4_cmd_info *cmd)
1421 int alop = vhcr->op_modifier;
1423 switch (vhcr->in_modifier) {
1425 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
1426 vhcr->in_param, &vhcr->out_param);
1430 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1431 vhcr->in_param, &vhcr->out_param);
1435 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
1436 vhcr->in_param, &vhcr->out_param);
1440 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1441 vhcr->in_param, &vhcr->out_param);
1445 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
1446 vhcr->in_param, &vhcr->out_param);
1450 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
1451 vhcr->in_param, &vhcr->out_param);
1455 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
1456 vhcr->in_param, &vhcr->out_param);
1460 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
1461 vhcr->in_param, &vhcr->out_param);
1465 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
1466 vhcr->in_param, &vhcr->out_param);
1477 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1486 case RES_OP_RESERVE:
1487 base = get_param_l(&in_param) & 0x7fffff;
1488 count = get_param_h(&in_param);
1489 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
1492 __mlx4_qp_release_range(dev, base, count);
1494 case RES_OP_MAP_ICM:
1495 qpn = get_param_l(&in_param) & 0x7fffff;
1496 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
1501 if (!valid_reserved(dev, slave, qpn))
1502 __mlx4_qp_free_icm(dev, qpn);
1504 res_end_move(dev, slave, RES_QP, qpn);
1506 if (valid_reserved(dev, slave, qpn))
1507 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
1516 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1517 u64 in_param, u64 *out_param)
1523 if (op != RES_OP_RESERVE_AND_MAP)
1526 base = get_param_l(&in_param);
1527 order = get_param_h(&in_param);
1528 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
1530 __mlx4_free_mtt_range(dev, base, order);
1534 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1540 struct res_mpt *mpt;
1543 case RES_OP_RESERVE:
1544 index = get_param_l(&in_param);
1545 id = index & mpt_mask(dev);
1546 err = get_res(dev, slave, id, RES_MPT, &mpt);
1550 put_res(dev, slave, id, RES_MPT);
1552 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
1555 __mlx4_mr_release(dev, index);
1557 case RES_OP_MAP_ICM:
1558 index = get_param_l(&in_param);
1559 id = index & mpt_mask(dev);
1560 err = mr_res_start_move_to(dev, slave, id,
1561 RES_MPT_RESERVED, &mpt);
1565 __mlx4_mr_free_icm(dev, mpt->key);
1566 res_end_move(dev, slave, RES_MPT, id);
1576 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1577 u64 in_param, u64 *out_param)
1583 case RES_OP_RESERVE_AND_MAP:
1584 cqn = get_param_l(&in_param);
1585 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1589 __mlx4_cq_free_icm(dev, cqn);
1600 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1601 u64 in_param, u64 *out_param)
1607 case RES_OP_RESERVE_AND_MAP:
1608 srqn = get_param_l(&in_param);
1609 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1613 __mlx4_srq_free_icm(dev, srqn);
1624 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1625 u64 in_param, u64 *out_param)
1631 case RES_OP_RESERVE_AND_MAP:
1632 port = get_param_l(out_param);
1633 mac_del_from_slave(dev, slave, in_param, port);
1634 __mlx4_unregister_mac(dev, port, in_param);
1645 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1646 u64 in_param, u64 *out_param)
1651 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1652 u64 in_param, u64 *out_param)
1657 if (op != RES_OP_RESERVE)
1660 index = get_param_l(&in_param);
1661 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
1665 __mlx4_counter_free(dev, index);
1670 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1671 u64 in_param, u64 *out_param)
1676 if (op != RES_OP_RESERVE)
1679 xrcdn = get_param_l(&in_param);
1680 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
1684 __mlx4_xrcd_free(dev, xrcdn);
1689 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
1690 struct mlx4_vhcr *vhcr,
1691 struct mlx4_cmd_mailbox *inbox,
1692 struct mlx4_cmd_mailbox *outbox,
1693 struct mlx4_cmd_info *cmd)
1696 int alop = vhcr->op_modifier;
1698 switch (vhcr->in_modifier) {
1700 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
1705 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
1706 vhcr->in_param, &vhcr->out_param);
1710 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
1715 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
1716 vhcr->in_param, &vhcr->out_param);
1720 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
1721 vhcr->in_param, &vhcr->out_param);
1725 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
1726 vhcr->in_param, &vhcr->out_param);
1730 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
1731 vhcr->in_param, &vhcr->out_param);
1735 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
1736 vhcr->in_param, &vhcr->out_param);
1740 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
1741 vhcr->in_param, &vhcr->out_param);
1749 /* ugly but other choices are uglier */
1750 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
1752 return (be32_to_cpu(mpt->flags) >> 9) & 1;
1755 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
1757 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
1760 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
1762 return be32_to_cpu(mpt->mtt_sz);
1765 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
1767 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
1770 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
1772 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
1775 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
1777 int page_shift = (qpc->log_page_size & 0x3f) + 12;
1778 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
1779 int log_sq_sride = qpc->sq_size_stride & 7;
1780 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
1781 int log_rq_stride = qpc->rq_size_stride & 7;
1782 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
1783 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
1784 int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
1789 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
1791 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
1792 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
1793 total_mem = sq_size + rq_size;
1795 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
1801 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
1802 int size, struct res_mtt *mtt)
1804 int res_start = mtt->com.res_id;
1805 int res_size = (1 << mtt->order);
1807 if (start < res_start || start + size > res_start + res_size)
1812 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1813 struct mlx4_vhcr *vhcr,
1814 struct mlx4_cmd_mailbox *inbox,
1815 struct mlx4_cmd_mailbox *outbox,
1816 struct mlx4_cmd_info *cmd)
1819 int index = vhcr->in_modifier;
1820 struct res_mtt *mtt;
1821 struct res_mpt *mpt;
1822 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
1826 id = index & mpt_mask(dev);
1827 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
1831 phys = mr_phys_mpt(inbox->buf);
1833 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1837 err = check_mtt_range(dev, slave, mtt_base,
1838 mr_get_mtt_size(inbox->buf), mtt);
1845 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1850 atomic_inc(&mtt->ref_count);
1851 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1854 res_end_move(dev, slave, RES_MPT, id);
1859 put_res(dev, slave, mtt->com.res_id, RES_MTT);
1861 res_abort_move(dev, slave, RES_MPT, id);
1866 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
1867 struct mlx4_vhcr *vhcr,
1868 struct mlx4_cmd_mailbox *inbox,
1869 struct mlx4_cmd_mailbox *outbox,
1870 struct mlx4_cmd_info *cmd)
1873 int index = vhcr->in_modifier;
1874 struct res_mpt *mpt;
1877 id = index & mpt_mask(dev);
1878 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
1882 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1887 atomic_dec(&mpt->mtt->ref_count);
1889 res_end_move(dev, slave, RES_MPT, id);
1893 res_abort_move(dev, slave, RES_MPT, id);
1898 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
1899 struct mlx4_vhcr *vhcr,
1900 struct mlx4_cmd_mailbox *inbox,
1901 struct mlx4_cmd_mailbox *outbox,
1902 struct mlx4_cmd_info *cmd)
1905 int index = vhcr->in_modifier;
1906 struct res_mpt *mpt;
1909 id = index & mpt_mask(dev);
1910 err = get_res(dev, slave, id, RES_MPT, &mpt);
1914 if (mpt->com.from_state != RES_MPT_HW) {
1919 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1922 put_res(dev, slave, id, RES_MPT);
1926 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
1928 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
1931 static int qp_get_scqn(struct mlx4_qp_context *qpc)
1933 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
1936 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
1938 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
1941 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1942 struct mlx4_vhcr *vhcr,
1943 struct mlx4_cmd_mailbox *inbox,
1944 struct mlx4_cmd_mailbox *outbox,
1945 struct mlx4_cmd_info *cmd)
1948 int qpn = vhcr->in_modifier & 0x7fffff;
1949 struct res_mtt *mtt;
1951 struct mlx4_qp_context *qpc = inbox->buf + 8;
1952 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
1953 int mtt_size = qp_get_mtt_size(qpc);
1956 int rcqn = qp_get_rcqn(qpc);
1957 int scqn = qp_get_scqn(qpc);
1958 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1959 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1960 struct res_srq *srq;
1961 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1963 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
1966 qp->local_qpn = local_qpn;
1968 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
1972 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
1976 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
1981 err = get_res(dev, slave, scqn, RES_CQ, &scq);
1988 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
1993 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
1996 atomic_inc(&mtt->ref_count);
1998 atomic_inc(&rcq->ref_count);
2000 atomic_inc(&scq->ref_count);
2004 put_res(dev, slave, scqn, RES_CQ);
2007 atomic_inc(&srq->ref_count);
2008 put_res(dev, slave, srqn, RES_SRQ);
2011 put_res(dev, slave, rcqn, RES_CQ);
2012 put_res(dev, slave, mtt_base, RES_MTT);
2013 res_end_move(dev, slave, RES_QP, qpn);
2019 put_res(dev, slave, srqn, RES_SRQ);
2022 put_res(dev, slave, scqn, RES_CQ);
2024 put_res(dev, slave, rcqn, RES_CQ);
2026 put_res(dev, slave, mtt_base, RES_MTT);
2028 res_abort_move(dev, slave, RES_QP, qpn);
2033 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2035 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2038 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2040 int log_eq_size = eqc->log_eq_size & 0x1f;
2041 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2043 if (log_eq_size + 5 < page_shift)
2046 return 1 << (log_eq_size + 5 - page_shift);
2049 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2051 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2054 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2056 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2057 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2059 if (log_cq_size + 5 < page_shift)
2062 return 1 << (log_cq_size + 5 - page_shift);
2065 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2066 struct mlx4_vhcr *vhcr,
2067 struct mlx4_cmd_mailbox *inbox,
2068 struct mlx4_cmd_mailbox *outbox,
2069 struct mlx4_cmd_info *cmd)
2072 int eqn = vhcr->in_modifier;
2073 int res_id = (slave << 8) | eqn;
2074 struct mlx4_eq_context *eqc = inbox->buf;
2075 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2076 int mtt_size = eq_get_mtt_size(eqc);
2078 struct res_mtt *mtt;
2080 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2083 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2087 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2091 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2095 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2099 atomic_inc(&mtt->ref_count);
2101 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2102 res_end_move(dev, slave, RES_EQ, res_id);
2106 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2108 res_abort_move(dev, slave, RES_EQ, res_id);
2110 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2114 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2115 int len, struct res_mtt **res)
2117 struct mlx4_priv *priv = mlx4_priv(dev);
2118 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2119 struct res_mtt *mtt;
2122 spin_lock_irq(mlx4_tlock(dev));
2123 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2125 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2127 mtt->com.from_state = mtt->com.state;
2128 mtt->com.state = RES_MTT_BUSY;
2133 spin_unlock_irq(mlx4_tlock(dev));
2138 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2139 struct mlx4_vhcr *vhcr,
2140 struct mlx4_cmd_mailbox *inbox,
2141 struct mlx4_cmd_mailbox *outbox,
2142 struct mlx4_cmd_info *cmd)
2144 struct mlx4_mtt mtt;
2145 __be64 *page_list = inbox->buf;
2146 u64 *pg_list = (u64 *)page_list;
2148 struct res_mtt *rmtt = NULL;
2149 int start = be64_to_cpu(page_list[0]);
2150 int npages = vhcr->in_modifier;
2153 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2157 /* Call the SW implementation of write_mtt:
2158 * - Prepare a dummy mtt struct
2159 * - Translate inbox contents to simple addresses in host endianess */
2160 mtt.offset = 0; /* TBD this is broken but I don't handle it since
2161 we don't really use it */
2164 for (i = 0; i < npages; ++i)
2165 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2167 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2168 ((u64 *)page_list + 2));
2171 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2176 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2177 struct mlx4_vhcr *vhcr,
2178 struct mlx4_cmd_mailbox *inbox,
2179 struct mlx4_cmd_mailbox *outbox,
2180 struct mlx4_cmd_info *cmd)
2182 int eqn = vhcr->in_modifier;
2183 int res_id = eqn | (slave << 8);
2187 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2191 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2195 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2199 atomic_dec(&eq->mtt->ref_count);
2200 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2201 res_end_move(dev, slave, RES_EQ, res_id);
2202 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2207 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2209 res_abort_move(dev, slave, RES_EQ, res_id);
2214 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2216 struct mlx4_priv *priv = mlx4_priv(dev);
2217 struct mlx4_slave_event_eq_info *event_eq;
2218 struct mlx4_cmd_mailbox *mailbox;
2219 u32 in_modifier = 0;
2224 if (!priv->mfunc.master.slave_state)
2227 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2229 /* Create the event only if the slave is registered */
2230 if (event_eq->eqn < 0)
2233 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2234 res_id = (slave << 8) | event_eq->eqn;
2235 err = get_res(dev, slave, res_id, RES_EQ, &req);
2239 if (req->com.from_state != RES_EQ_HW) {
2244 mailbox = mlx4_alloc_cmd_mailbox(dev);
2245 if (IS_ERR(mailbox)) {
2246 err = PTR_ERR(mailbox);
2250 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
2252 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
2255 memcpy(mailbox->buf, (u8 *) eqe, 28);
2257 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
2259 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
2260 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
2263 put_res(dev, slave, res_id, RES_EQ);
2264 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2265 mlx4_free_cmd_mailbox(dev, mailbox);
2269 put_res(dev, slave, res_id, RES_EQ);
2272 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
2276 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
2277 struct mlx4_vhcr *vhcr,
2278 struct mlx4_cmd_mailbox *inbox,
2279 struct mlx4_cmd_mailbox *outbox,
2280 struct mlx4_cmd_info *cmd)
2282 int eqn = vhcr->in_modifier;
2283 int res_id = eqn | (slave << 8);
2287 err = get_res(dev, slave, res_id, RES_EQ, &eq);
2291 if (eq->com.from_state != RES_EQ_HW) {
2296 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2299 put_res(dev, slave, res_id, RES_EQ);
2303 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2304 struct mlx4_vhcr *vhcr,
2305 struct mlx4_cmd_mailbox *inbox,
2306 struct mlx4_cmd_mailbox *outbox,
2307 struct mlx4_cmd_info *cmd)
2310 int cqn = vhcr->in_modifier;
2311 struct mlx4_cq_context *cqc = inbox->buf;
2312 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2314 struct res_mtt *mtt;
2316 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
2319 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2322 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2325 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2328 atomic_inc(&mtt->ref_count);
2330 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2331 res_end_move(dev, slave, RES_CQ, cqn);
2335 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2337 res_abort_move(dev, slave, RES_CQ, cqn);
2341 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
2342 struct mlx4_vhcr *vhcr,
2343 struct mlx4_cmd_mailbox *inbox,
2344 struct mlx4_cmd_mailbox *outbox,
2345 struct mlx4_cmd_info *cmd)
2348 int cqn = vhcr->in_modifier;
2351 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
2354 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2357 atomic_dec(&cq->mtt->ref_count);
2358 res_end_move(dev, slave, RES_CQ, cqn);
2362 res_abort_move(dev, slave, RES_CQ, cqn);
2366 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2367 struct mlx4_vhcr *vhcr,
2368 struct mlx4_cmd_mailbox *inbox,
2369 struct mlx4_cmd_mailbox *outbox,
2370 struct mlx4_cmd_info *cmd)
2372 int cqn = vhcr->in_modifier;
2376 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2380 if (cq->com.from_state != RES_CQ_HW)
2383 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2385 put_res(dev, slave, cqn, RES_CQ);
2390 static int handle_resize(struct mlx4_dev *dev, int slave,
2391 struct mlx4_vhcr *vhcr,
2392 struct mlx4_cmd_mailbox *inbox,
2393 struct mlx4_cmd_mailbox *outbox,
2394 struct mlx4_cmd_info *cmd,
2398 struct res_mtt *orig_mtt;
2399 struct res_mtt *mtt;
2400 struct mlx4_cq_context *cqc = inbox->buf;
2401 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
2403 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
2407 if (orig_mtt != cq->mtt) {
2412 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2416 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
2419 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2422 atomic_dec(&orig_mtt->ref_count);
2423 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2424 atomic_inc(&mtt->ref_count);
2426 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2430 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2432 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
2438 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
2439 struct mlx4_vhcr *vhcr,
2440 struct mlx4_cmd_mailbox *inbox,
2441 struct mlx4_cmd_mailbox *outbox,
2442 struct mlx4_cmd_info *cmd)
2444 int cqn = vhcr->in_modifier;
2448 err = get_res(dev, slave, cqn, RES_CQ, &cq);
2452 if (cq->com.from_state != RES_CQ_HW)
2455 if (vhcr->op_modifier == 0) {
2456 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
2460 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2462 put_res(dev, slave, cqn, RES_CQ);
2467 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
2469 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
2470 int log_rq_stride = srqc->logstride & 7;
2471 int page_shift = (srqc->log_page_size & 0x3f) + 12;
2473 if (log_srq_size + log_rq_stride + 4 < page_shift)
2476 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
2479 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2480 struct mlx4_vhcr *vhcr,
2481 struct mlx4_cmd_mailbox *inbox,
2482 struct mlx4_cmd_mailbox *outbox,
2483 struct mlx4_cmd_info *cmd)
2486 int srqn = vhcr->in_modifier;
2487 struct res_mtt *mtt;
2488 struct res_srq *srq;
2489 struct mlx4_srq_context *srqc = inbox->buf;
2490 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
2492 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
2495 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
2498 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2501 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
2506 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2510 atomic_inc(&mtt->ref_count);
2512 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2513 res_end_move(dev, slave, RES_SRQ, srqn);
2517 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2519 res_abort_move(dev, slave, RES_SRQ, srqn);
2524 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2525 struct mlx4_vhcr *vhcr,
2526 struct mlx4_cmd_mailbox *inbox,
2527 struct mlx4_cmd_mailbox *outbox,
2528 struct mlx4_cmd_info *cmd)
2531 int srqn = vhcr->in_modifier;
2532 struct res_srq *srq;
2534 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
2537 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2540 atomic_dec(&srq->mtt->ref_count);
2542 atomic_dec(&srq->cq->ref_count);
2543 res_end_move(dev, slave, RES_SRQ, srqn);
2548 res_abort_move(dev, slave, RES_SRQ, srqn);
2553 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2554 struct mlx4_vhcr *vhcr,
2555 struct mlx4_cmd_mailbox *inbox,
2556 struct mlx4_cmd_mailbox *outbox,
2557 struct mlx4_cmd_info *cmd)
2560 int srqn = vhcr->in_modifier;
2561 struct res_srq *srq;
2563 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2566 if (srq->com.from_state != RES_SRQ_HW) {
2570 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2572 put_res(dev, slave, srqn, RES_SRQ);
2576 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
2577 struct mlx4_vhcr *vhcr,
2578 struct mlx4_cmd_mailbox *inbox,
2579 struct mlx4_cmd_mailbox *outbox,
2580 struct mlx4_cmd_info *cmd)
2583 int srqn = vhcr->in_modifier;
2584 struct res_srq *srq;
2586 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2590 if (srq->com.from_state != RES_SRQ_HW) {
2595 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2597 put_res(dev, slave, srqn, RES_SRQ);
2601 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
2602 struct mlx4_vhcr *vhcr,
2603 struct mlx4_cmd_mailbox *inbox,
2604 struct mlx4_cmd_mailbox *outbox,
2605 struct mlx4_cmd_info *cmd)
2608 int qpn = vhcr->in_modifier & 0x7fffff;
2611 err = get_res(dev, slave, qpn, RES_QP, &qp);
2614 if (qp->com.from_state != RES_QP_HW) {
2619 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2621 put_res(dev, slave, qpn, RES_QP);
2625 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
2626 struct mlx4_vhcr *vhcr,
2627 struct mlx4_cmd_mailbox *inbox,
2628 struct mlx4_cmd_mailbox *outbox,
2629 struct mlx4_cmd_info *cmd)
2631 struct mlx4_qp_context *qpc = inbox->buf + 8;
2633 update_ud_gid(dev, qpc, (u8)slave);
2635 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2638 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
2639 struct mlx4_vhcr *vhcr,
2640 struct mlx4_cmd_mailbox *inbox,
2641 struct mlx4_cmd_mailbox *outbox,
2642 struct mlx4_cmd_info *cmd)
2645 int qpn = vhcr->in_modifier & 0x7fffff;
2648 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
2651 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2655 atomic_dec(&qp->mtt->ref_count);
2656 atomic_dec(&qp->rcq->ref_count);
2657 atomic_dec(&qp->scq->ref_count);
2659 atomic_dec(&qp->srq->ref_count);
2660 res_end_move(dev, slave, RES_QP, qpn);
2664 res_abort_move(dev, slave, RES_QP, qpn);
2669 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
2670 struct res_qp *rqp, u8 *gid)
2672 struct res_gid *res;
2674 list_for_each_entry(res, &rqp->mcg_list, list) {
2675 if (!memcmp(res->gid, gid, 16))
2681 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2682 u8 *gid, enum mlx4_protocol prot,
2683 enum mlx4_steer_type steer)
2685 struct res_gid *res;
2688 res = kzalloc(sizeof *res, GFP_KERNEL);
2692 spin_lock_irq(&rqp->mcg_spl);
2693 if (find_gid(dev, slave, rqp, gid)) {
2697 memcpy(res->gid, gid, 16);
2700 list_add_tail(&res->list, &rqp->mcg_list);
2703 spin_unlock_irq(&rqp->mcg_spl);
2708 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
2709 u8 *gid, enum mlx4_protocol prot,
2710 enum mlx4_steer_type steer)
2712 struct res_gid *res;
2715 spin_lock_irq(&rqp->mcg_spl);
2716 res = find_gid(dev, slave, rqp, gid);
2717 if (!res || res->prot != prot || res->steer != steer)
2720 list_del(&res->list);
2724 spin_unlock_irq(&rqp->mcg_spl);
2729 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2730 struct mlx4_vhcr *vhcr,
2731 struct mlx4_cmd_mailbox *inbox,
2732 struct mlx4_cmd_mailbox *outbox,
2733 struct mlx4_cmd_info *cmd)
2735 struct mlx4_qp qp; /* dummy for calling attach/detach */
2736 u8 *gid = inbox->buf;
2737 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
2741 int attach = vhcr->op_modifier;
2742 int block_loopback = vhcr->in_modifier >> 31;
2743 u8 steer_type_mask = 2;
2744 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2746 qpn = vhcr->in_modifier & 0xffffff;
2747 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2753 err = add_mcg_res(dev, slave, rqp, gid, prot, type);
2757 err = mlx4_qp_attach_common(dev, &qp, gid,
2758 block_loopback, prot, type);
2762 err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
2765 err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
2768 put_res(dev, slave, qpn, RES_QP);
2772 /* ignore error return below, already in error */
2773 (void) rem_mcg_res(dev, slave, rqp, gid, prot, type);
2775 put_res(dev, slave, qpn, RES_QP);
2781 * MAC validation for Flow Steering rules.
2782 * VF can attach rules only with a mac address which is assigned to it.
2784 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
2785 struct list_head *rlist)
2787 struct mac_res *res, *tmp;
2790 /* make sure it isn't multicast or broadcast mac*/
2791 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
2792 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
2793 list_for_each_entry_safe(res, tmp, rlist, list) {
2794 be_mac = cpu_to_be64(res->mac << 16);
2795 if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
2798 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
2799 eth_header->eth.dst_mac, slave);
2806 * In case of missing eth header, append eth header with a MAC address
2807 * assigned to the VF.
2809 static int add_eth_header(struct mlx4_dev *dev, int slave,
2810 struct mlx4_cmd_mailbox *inbox,
2811 struct list_head *rlist, int header_id)
2813 struct mac_res *res, *tmp;
2815 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
2816 struct mlx4_net_trans_rule_hw_eth *eth_header;
2817 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
2818 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
2820 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
2822 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
2823 port = be32_to_cpu(ctrl->vf_vep_port) & 0xff;
2824 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
2826 /* Clear a space in the inbox for eth header */
2827 switch (header_id) {
2828 case MLX4_NET_TRANS_RULE_ID_IPV4:
2830 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
2831 memmove(ip_header, eth_header,
2832 sizeof(*ip_header) + sizeof(*l4_header));
2834 case MLX4_NET_TRANS_RULE_ID_TCP:
2835 case MLX4_NET_TRANS_RULE_ID_UDP:
2836 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
2838 memmove(l4_header, eth_header, sizeof(*l4_header));
2843 list_for_each_entry_safe(res, tmp, rlist, list) {
2844 if (port == res->port) {
2845 be_mac = cpu_to_be64(res->mac << 16);
2850 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
2855 memset(eth_header, 0, sizeof(*eth_header));
2856 eth_header->size = sizeof(*eth_header) >> 2;
2857 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
2858 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
2859 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
2865 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2866 struct mlx4_vhcr *vhcr,
2867 struct mlx4_cmd_mailbox *inbox,
2868 struct mlx4_cmd_mailbox *outbox,
2869 struct mlx4_cmd_info *cmd)
2872 struct mlx4_priv *priv = mlx4_priv(dev);
2873 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2874 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
2876 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
2877 struct _rule_hw *rule_header;
2880 if (dev->caps.steering_mode !=
2881 MLX4_STEERING_MODE_DEVICE_MANAGED)
2884 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
2885 rule_header = (struct _rule_hw *)(ctrl + 1);
2886 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
2888 switch (header_id) {
2889 case MLX4_NET_TRANS_RULE_ID_ETH:
2890 if (validate_eth_header_mac(slave, rule_header, rlist))
2893 case MLX4_NET_TRANS_RULE_ID_IPV4:
2894 case MLX4_NET_TRANS_RULE_ID_TCP:
2895 case MLX4_NET_TRANS_RULE_ID_UDP:
2896 pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
2897 if (add_eth_header(dev, slave, inbox, rlist, header_id))
2899 vhcr->in_modifier +=
2900 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
2903 pr_err("Corrupted mailbox.\n");
2907 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
2908 vhcr->in_modifier, 0,
2909 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2914 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
2916 mlx4_err(dev, "Fail to add flow steering resources.\n ");
2918 mlx4_cmd(dev, vhcr->out_param, 0, 0,
2919 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
2925 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
2926 struct mlx4_vhcr *vhcr,
2927 struct mlx4_cmd_mailbox *inbox,
2928 struct mlx4_cmd_mailbox *outbox,
2929 struct mlx4_cmd_info *cmd)
2933 if (dev->caps.steering_mode !=
2934 MLX4_STEERING_MODE_DEVICE_MANAGED)
2937 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
2939 mlx4_err(dev, "Fail to remove flow steering resources.\n ");
2943 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
2944 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
2950 BUSY_MAX_RETRIES = 10
2953 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
2954 struct mlx4_vhcr *vhcr,
2955 struct mlx4_cmd_mailbox *inbox,
2956 struct mlx4_cmd_mailbox *outbox,
2957 struct mlx4_cmd_info *cmd)
2960 int index = vhcr->in_modifier & 0xffff;
2962 err = get_res(dev, slave, index, RES_COUNTER, NULL);
2966 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2967 put_res(dev, slave, index, RES_COUNTER);
2971 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
2973 struct res_gid *rgid;
2974 struct res_gid *tmp;
2975 struct mlx4_qp qp; /* dummy for calling attach/detach */
2977 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
2978 qp.qpn = rqp->local_qpn;
2979 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
2981 list_del(&rgid->list);
2986 static int _move_all_busy(struct mlx4_dev *dev, int slave,
2987 enum mlx4_resource type, int print)
2989 struct mlx4_priv *priv = mlx4_priv(dev);
2990 struct mlx4_resource_tracker *tracker =
2991 &priv->mfunc.master.res_tracker;
2992 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
2993 struct res_common *r;
2994 struct res_common *tmp;
2998 spin_lock_irq(mlx4_tlock(dev));
2999 list_for_each_entry_safe(r, tmp, rlist, list) {
3000 if (r->owner == slave) {
3002 if (r->state == RES_ANY_BUSY) {
3005 "%s id 0x%llx is busy\n",
3010 r->from_state = r->state;
3011 r->state = RES_ANY_BUSY;
3017 spin_unlock_irq(mlx4_tlock(dev));
3022 static int move_all_busy(struct mlx4_dev *dev, int slave,
3023 enum mlx4_resource type)
3025 unsigned long begin;
3030 busy = _move_all_busy(dev, slave, type, 0);
3031 if (time_after(jiffies, begin + 5 * HZ))
3038 busy = _move_all_busy(dev, slave, type, 1);
3042 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
3044 struct mlx4_priv *priv = mlx4_priv(dev);
3045 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3046 struct list_head *qp_list =
3047 &tracker->slave_list[slave].res_list[RES_QP];
3055 err = move_all_busy(dev, slave, RES_QP);
3057 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
3058 "for slave %d\n", slave);
3060 spin_lock_irq(mlx4_tlock(dev));
3061 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
3062 spin_unlock_irq(mlx4_tlock(dev));
3063 if (qp->com.owner == slave) {
3064 qpn = qp->com.res_id;
3065 detach_qp(dev, slave, qp);
3066 state = qp->com.from_state;
3067 while (state != 0) {
3069 case RES_QP_RESERVED:
3070 spin_lock_irq(mlx4_tlock(dev));
3071 rb_erase(&qp->com.node,
3072 &tracker->res_tree[RES_QP]);
3073 list_del(&qp->com.list);
3074 spin_unlock_irq(mlx4_tlock(dev));
3079 if (!valid_reserved(dev, slave, qpn))
3080 __mlx4_qp_free_icm(dev, qpn);
3081 state = RES_QP_RESERVED;
3085 err = mlx4_cmd(dev, in_param,
3088 MLX4_CMD_TIME_CLASS_A,
3091 mlx4_dbg(dev, "rem_slave_qps: failed"
3092 " to move slave %d qpn %d to"
3095 atomic_dec(&qp->rcq->ref_count);
3096 atomic_dec(&qp->scq->ref_count);
3097 atomic_dec(&qp->mtt->ref_count);
3099 atomic_dec(&qp->srq->ref_count);
3100 state = RES_QP_MAPPED;
3107 spin_lock_irq(mlx4_tlock(dev));
3109 spin_unlock_irq(mlx4_tlock(dev));
3112 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
3114 struct mlx4_priv *priv = mlx4_priv(dev);
3115 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3116 struct list_head *srq_list =
3117 &tracker->slave_list[slave].res_list[RES_SRQ];
3118 struct res_srq *srq;
3119 struct res_srq *tmp;
3126 err = move_all_busy(dev, slave, RES_SRQ);
3128 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
3129 "busy for slave %d\n", slave);
3131 spin_lock_irq(mlx4_tlock(dev));
3132 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
3133 spin_unlock_irq(mlx4_tlock(dev));
3134 if (srq->com.owner == slave) {
3135 srqn = srq->com.res_id;
3136 state = srq->com.from_state;
3137 while (state != 0) {
3139 case RES_SRQ_ALLOCATED:
3140 __mlx4_srq_free_icm(dev, srqn);
3141 spin_lock_irq(mlx4_tlock(dev));
3142 rb_erase(&srq->com.node,
3143 &tracker->res_tree[RES_SRQ]);
3144 list_del(&srq->com.list);
3145 spin_unlock_irq(mlx4_tlock(dev));
3152 err = mlx4_cmd(dev, in_param, srqn, 1,
3154 MLX4_CMD_TIME_CLASS_A,
3157 mlx4_dbg(dev, "rem_slave_srqs: failed"
3158 " to move slave %d srq %d to"
3162 atomic_dec(&srq->mtt->ref_count);
3164 atomic_dec(&srq->cq->ref_count);
3165 state = RES_SRQ_ALLOCATED;
3173 spin_lock_irq(mlx4_tlock(dev));
3175 spin_unlock_irq(mlx4_tlock(dev));
3178 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
3180 struct mlx4_priv *priv = mlx4_priv(dev);
3181 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3182 struct list_head *cq_list =
3183 &tracker->slave_list[slave].res_list[RES_CQ];
3192 err = move_all_busy(dev, slave, RES_CQ);
3194 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
3195 "busy for slave %d\n", slave);
3197 spin_lock_irq(mlx4_tlock(dev));
3198 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
3199 spin_unlock_irq(mlx4_tlock(dev));
3200 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
3201 cqn = cq->com.res_id;
3202 state = cq->com.from_state;
3203 while (state != 0) {
3205 case RES_CQ_ALLOCATED:
3206 __mlx4_cq_free_icm(dev, cqn);
3207 spin_lock_irq(mlx4_tlock(dev));
3208 rb_erase(&cq->com.node,
3209 &tracker->res_tree[RES_CQ]);
3210 list_del(&cq->com.list);
3211 spin_unlock_irq(mlx4_tlock(dev));
3218 err = mlx4_cmd(dev, in_param, cqn, 1,
3220 MLX4_CMD_TIME_CLASS_A,
3223 mlx4_dbg(dev, "rem_slave_cqs: failed"
3224 " to move slave %d cq %d to"
3227 atomic_dec(&cq->mtt->ref_count);
3228 state = RES_CQ_ALLOCATED;
3236 spin_lock_irq(mlx4_tlock(dev));
3238 spin_unlock_irq(mlx4_tlock(dev));
3241 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
3243 struct mlx4_priv *priv = mlx4_priv(dev);
3244 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3245 struct list_head *mpt_list =
3246 &tracker->slave_list[slave].res_list[RES_MPT];
3247 struct res_mpt *mpt;
3248 struct res_mpt *tmp;
3255 err = move_all_busy(dev, slave, RES_MPT);
3257 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
3258 "busy for slave %d\n", slave);
3260 spin_lock_irq(mlx4_tlock(dev));
3261 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
3262 spin_unlock_irq(mlx4_tlock(dev));
3263 if (mpt->com.owner == slave) {
3264 mptn = mpt->com.res_id;
3265 state = mpt->com.from_state;
3266 while (state != 0) {
3268 case RES_MPT_RESERVED:
3269 __mlx4_mr_release(dev, mpt->key);
3270 spin_lock_irq(mlx4_tlock(dev));
3271 rb_erase(&mpt->com.node,
3272 &tracker->res_tree[RES_MPT]);
3273 list_del(&mpt->com.list);
3274 spin_unlock_irq(mlx4_tlock(dev));
3279 case RES_MPT_MAPPED:
3280 __mlx4_mr_free_icm(dev, mpt->key);
3281 state = RES_MPT_RESERVED;
3286 err = mlx4_cmd(dev, in_param, mptn, 0,
3288 MLX4_CMD_TIME_CLASS_A,
3291 mlx4_dbg(dev, "rem_slave_mrs: failed"
3292 " to move slave %d mpt %d to"
3296 atomic_dec(&mpt->mtt->ref_count);
3297 state = RES_MPT_MAPPED;
3304 spin_lock_irq(mlx4_tlock(dev));
3306 spin_unlock_irq(mlx4_tlock(dev));
3309 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
3311 struct mlx4_priv *priv = mlx4_priv(dev);
3312 struct mlx4_resource_tracker *tracker =
3313 &priv->mfunc.master.res_tracker;
3314 struct list_head *mtt_list =
3315 &tracker->slave_list[slave].res_list[RES_MTT];
3316 struct res_mtt *mtt;
3317 struct res_mtt *tmp;
3323 err = move_all_busy(dev, slave, RES_MTT);
3325 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
3326 "busy for slave %d\n", slave);
3328 spin_lock_irq(mlx4_tlock(dev));
3329 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
3330 spin_unlock_irq(mlx4_tlock(dev));
3331 if (mtt->com.owner == slave) {
3332 base = mtt->com.res_id;
3333 state = mtt->com.from_state;
3334 while (state != 0) {
3336 case RES_MTT_ALLOCATED:
3337 __mlx4_free_mtt_range(dev, base,
3339 spin_lock_irq(mlx4_tlock(dev));
3340 rb_erase(&mtt->com.node,
3341 &tracker->res_tree[RES_MTT]);
3342 list_del(&mtt->com.list);
3343 spin_unlock_irq(mlx4_tlock(dev));
3353 spin_lock_irq(mlx4_tlock(dev));
3355 spin_unlock_irq(mlx4_tlock(dev));
3358 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
3360 struct mlx4_priv *priv = mlx4_priv(dev);
3361 struct mlx4_resource_tracker *tracker =
3362 &priv->mfunc.master.res_tracker;
3363 struct list_head *fs_rule_list =
3364 &tracker->slave_list[slave].res_list[RES_FS_RULE];
3365 struct res_fs_rule *fs_rule;
3366 struct res_fs_rule *tmp;
3371 err = move_all_busy(dev, slave, RES_FS_RULE);
3373 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
3376 spin_lock_irq(mlx4_tlock(dev));
3377 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
3378 spin_unlock_irq(mlx4_tlock(dev));
3379 if (fs_rule->com.owner == slave) {
3380 base = fs_rule->com.res_id;
3381 state = fs_rule->com.from_state;
3382 while (state != 0) {
3384 case RES_FS_RULE_ALLOCATED:
3386 err = mlx4_cmd(dev, base, 0, 0,
3387 MLX4_QP_FLOW_STEERING_DETACH,
3388 MLX4_CMD_TIME_CLASS_A,
3391 spin_lock_irq(mlx4_tlock(dev));
3392 rb_erase(&fs_rule->com.node,
3393 &tracker->res_tree[RES_FS_RULE]);
3394 list_del(&fs_rule->com.list);
3395 spin_unlock_irq(mlx4_tlock(dev));
3405 spin_lock_irq(mlx4_tlock(dev));
3407 spin_unlock_irq(mlx4_tlock(dev));
3410 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
3412 struct mlx4_priv *priv = mlx4_priv(dev);
3413 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3414 struct list_head *eq_list =
3415 &tracker->slave_list[slave].res_list[RES_EQ];
3422 struct mlx4_cmd_mailbox *mailbox;
3424 err = move_all_busy(dev, slave, RES_EQ);
3426 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
3427 "busy for slave %d\n", slave);
3429 spin_lock_irq(mlx4_tlock(dev));
3430 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
3431 spin_unlock_irq(mlx4_tlock(dev));
3432 if (eq->com.owner == slave) {
3433 eqn = eq->com.res_id;
3434 state = eq->com.from_state;
3435 while (state != 0) {
3437 case RES_EQ_RESERVED:
3438 spin_lock_irq(mlx4_tlock(dev));
3439 rb_erase(&eq->com.node,
3440 &tracker->res_tree[RES_EQ]);
3441 list_del(&eq->com.list);
3442 spin_unlock_irq(mlx4_tlock(dev));
3448 mailbox = mlx4_alloc_cmd_mailbox(dev);
3449 if (IS_ERR(mailbox)) {
3453 err = mlx4_cmd_box(dev, slave, 0,
3456 MLX4_CMD_TIME_CLASS_A,
3459 mlx4_dbg(dev, "rem_slave_eqs: failed"
3460 " to move slave %d eqs %d to"
3461 " SW ownership\n", slave, eqn);
3462 mlx4_free_cmd_mailbox(dev, mailbox);
3463 atomic_dec(&eq->mtt->ref_count);
3464 state = RES_EQ_RESERVED;
3472 spin_lock_irq(mlx4_tlock(dev));
3474 spin_unlock_irq(mlx4_tlock(dev));
3477 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
3479 struct mlx4_priv *priv = mlx4_priv(dev);
3480 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3481 struct list_head *counter_list =
3482 &tracker->slave_list[slave].res_list[RES_COUNTER];
3483 struct res_counter *counter;
3484 struct res_counter *tmp;
3488 err = move_all_busy(dev, slave, RES_COUNTER);
3490 mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
3491 "busy for slave %d\n", slave);
3493 spin_lock_irq(mlx4_tlock(dev));
3494 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
3495 if (counter->com.owner == slave) {
3496 index = counter->com.res_id;
3497 rb_erase(&counter->com.node,
3498 &tracker->res_tree[RES_COUNTER]);
3499 list_del(&counter->com.list);
3501 __mlx4_counter_free(dev, index);
3504 spin_unlock_irq(mlx4_tlock(dev));
3507 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
3509 struct mlx4_priv *priv = mlx4_priv(dev);
3510 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3511 struct list_head *xrcdn_list =
3512 &tracker->slave_list[slave].res_list[RES_XRCD];
3513 struct res_xrcdn *xrcd;
3514 struct res_xrcdn *tmp;
3518 err = move_all_busy(dev, slave, RES_XRCD);
3520 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
3521 "busy for slave %d\n", slave);
3523 spin_lock_irq(mlx4_tlock(dev));
3524 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
3525 if (xrcd->com.owner == slave) {
3526 xrcdn = xrcd->com.res_id;
3527 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
3528 list_del(&xrcd->com.list);
3530 __mlx4_xrcd_free(dev, xrcdn);
3533 spin_unlock_irq(mlx4_tlock(dev));
3536 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
3538 struct mlx4_priv *priv = mlx4_priv(dev);
3540 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
3542 rem_slave_macs(dev, slave);
3543 rem_slave_qps(dev, slave);
3544 rem_slave_srqs(dev, slave);
3545 rem_slave_cqs(dev, slave);
3546 rem_slave_mrs(dev, slave);
3547 rem_slave_eqs(dev, slave);
3548 rem_slave_mtts(dev, slave);
3549 rem_slave_counters(dev, slave);
3550 rem_slave_xrcdns(dev, slave);
3551 rem_slave_fs_rule(dev, slave);
3552 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);