]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
ab48386bfefcd4ea18c3173cc151af0100a016f1
[karo-tx-linux.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49
50 #define MLX4_MAC_VALID          (1ull << 63)
51
52 struct mac_res {
53         struct list_head list;
54         u64 mac;
55         int ref_count;
56         u8 smac_index;
57         u8 port;
58 };
59
60 struct vlan_res {
61         struct list_head list;
62         u16 vlan;
63         int ref_count;
64         int vlan_index;
65         u8 port;
66 };
67
68 struct res_common {
69         struct list_head        list;
70         struct rb_node          node;
71         u64                     res_id;
72         int                     owner;
73         int                     state;
74         int                     from_state;
75         int                     to_state;
76         int                     removing;
77 };
78
79 enum {
80         RES_ANY_BUSY = 1
81 };
82
83 struct res_gid {
84         struct list_head        list;
85         u8                      gid[16];
86         enum mlx4_protocol      prot;
87         enum mlx4_steer_type    steer;
88         u64                     reg_id;
89 };
90
91 enum res_qp_states {
92         RES_QP_BUSY = RES_ANY_BUSY,
93
94         /* QP number was allocated */
95         RES_QP_RESERVED,
96
97         /* ICM memory for QP context was mapped */
98         RES_QP_MAPPED,
99
100         /* QP is in hw ownership */
101         RES_QP_HW
102 };
103
104 struct res_qp {
105         struct res_common       com;
106         struct res_mtt         *mtt;
107         struct res_cq          *rcq;
108         struct res_cq          *scq;
109         struct res_srq         *srq;
110         struct list_head        mcg_list;
111         spinlock_t              mcg_spl;
112         int                     local_qpn;
113         atomic_t                ref_count;
114         u32                     qpc_flags;
115         /* saved qp params before VST enforcement in order to restore on VGT */
116         u8                      sched_queue;
117         __be32                  param3;
118         u8                      vlan_control;
119         u8                      fvl_rx;
120         u8                      pri_path_fl;
121         u8                      vlan_index;
122         u8                      feup;
123 };
124
125 enum res_mtt_states {
126         RES_MTT_BUSY = RES_ANY_BUSY,
127         RES_MTT_ALLOCATED,
128 };
129
130 static inline const char *mtt_states_str(enum res_mtt_states state)
131 {
132         switch (state) {
133         case RES_MTT_BUSY: return "RES_MTT_BUSY";
134         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135         default: return "Unknown";
136         }
137 }
138
139 struct res_mtt {
140         struct res_common       com;
141         int                     order;
142         atomic_t                ref_count;
143 };
144
145 enum res_mpt_states {
146         RES_MPT_BUSY = RES_ANY_BUSY,
147         RES_MPT_RESERVED,
148         RES_MPT_MAPPED,
149         RES_MPT_HW,
150 };
151
152 struct res_mpt {
153         struct res_common       com;
154         struct res_mtt         *mtt;
155         int                     key;
156 };
157
158 enum res_eq_states {
159         RES_EQ_BUSY = RES_ANY_BUSY,
160         RES_EQ_RESERVED,
161         RES_EQ_HW,
162 };
163
164 struct res_eq {
165         struct res_common       com;
166         struct res_mtt         *mtt;
167 };
168
169 enum res_cq_states {
170         RES_CQ_BUSY = RES_ANY_BUSY,
171         RES_CQ_ALLOCATED,
172         RES_CQ_HW,
173 };
174
175 struct res_cq {
176         struct res_common       com;
177         struct res_mtt         *mtt;
178         atomic_t                ref_count;
179 };
180
181 enum res_srq_states {
182         RES_SRQ_BUSY = RES_ANY_BUSY,
183         RES_SRQ_ALLOCATED,
184         RES_SRQ_HW,
185 };
186
187 struct res_srq {
188         struct res_common       com;
189         struct res_mtt         *mtt;
190         struct res_cq          *cq;
191         atomic_t                ref_count;
192 };
193
194 enum res_counter_states {
195         RES_COUNTER_BUSY = RES_ANY_BUSY,
196         RES_COUNTER_ALLOCATED,
197 };
198
199 struct res_counter {
200         struct res_common       com;
201         int                     port;
202 };
203
204 enum res_xrcdn_states {
205         RES_XRCD_BUSY = RES_ANY_BUSY,
206         RES_XRCD_ALLOCATED,
207 };
208
209 struct res_xrcdn {
210         struct res_common       com;
211         int                     port;
212 };
213
214 enum res_fs_rule_states {
215         RES_FS_RULE_BUSY = RES_ANY_BUSY,
216         RES_FS_RULE_ALLOCATED,
217 };
218
219 struct res_fs_rule {
220         struct res_common       com;
221         int                     qpn;
222 };
223
224 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
225 {
226         struct rb_node *node = root->rb_node;
227
228         while (node) {
229                 struct res_common *res = container_of(node, struct res_common,
230                                                       node);
231
232                 if (res_id < res->res_id)
233                         node = node->rb_left;
234                 else if (res_id > res->res_id)
235                         node = node->rb_right;
236                 else
237                         return res;
238         }
239         return NULL;
240 }
241
242 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
243 {
244         struct rb_node **new = &(root->rb_node), *parent = NULL;
245
246         /* Figure out where to put new node */
247         while (*new) {
248                 struct res_common *this = container_of(*new, struct res_common,
249                                                        node);
250
251                 parent = *new;
252                 if (res->res_id < this->res_id)
253                         new = &((*new)->rb_left);
254                 else if (res->res_id > this->res_id)
255                         new = &((*new)->rb_right);
256                 else
257                         return -EEXIST;
258         }
259
260         /* Add new node and rebalance tree. */
261         rb_link_node(&res->node, parent, new);
262         rb_insert_color(&res->node, root);
263
264         return 0;
265 }
266
267 enum qp_transition {
268         QP_TRANS_INIT2RTR,
269         QP_TRANS_RTR2RTS,
270         QP_TRANS_RTS2RTS,
271         QP_TRANS_SQERR2RTS,
272         QP_TRANS_SQD2SQD,
273         QP_TRANS_SQD2RTS
274 };
275
276 /* For Debug uses */
277 static const char *resource_str(enum mlx4_resource rt)
278 {
279         switch (rt) {
280         case RES_QP: return "RES_QP";
281         case RES_CQ: return "RES_CQ";
282         case RES_SRQ: return "RES_SRQ";
283         case RES_MPT: return "RES_MPT";
284         case RES_MTT: return "RES_MTT";
285         case RES_MAC: return  "RES_MAC";
286         case RES_VLAN: return  "RES_VLAN";
287         case RES_EQ: return "RES_EQ";
288         case RES_COUNTER: return "RES_COUNTER";
289         case RES_FS_RULE: return "RES_FS_RULE";
290         case RES_XRCD: return "RES_XRCD";
291         default: return "Unknown resource type !!!";
292         };
293 }
294
295 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
296 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
297                                       enum mlx4_resource res_type, int count,
298                                       int port)
299 {
300         struct mlx4_priv *priv = mlx4_priv(dev);
301         struct resource_allocator *res_alloc =
302                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
303         int err = -EINVAL;
304         int allocated, free, reserved, guaranteed, from_free;
305         int from_rsvd;
306
307         if (slave > dev->persist->num_vfs)
308                 return -EINVAL;
309
310         spin_lock(&res_alloc->alloc_lock);
311         allocated = (port > 0) ?
312                 res_alloc->allocated[(port - 1) *
313                 (dev->persist->num_vfs + 1) + slave] :
314                 res_alloc->allocated[slave];
315         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
316                 res_alloc->res_free;
317         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
318                 res_alloc->res_reserved;
319         guaranteed = res_alloc->guaranteed[slave];
320
321         if (allocated + count > res_alloc->quota[slave]) {
322                 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
323                           slave, port, resource_str(res_type), count,
324                           allocated, res_alloc->quota[slave]);
325                 goto out;
326         }
327
328         if (allocated + count <= guaranteed) {
329                 err = 0;
330                 from_rsvd = count;
331         } else {
332                 /* portion may need to be obtained from free area */
333                 if (guaranteed - allocated > 0)
334                         from_free = count - (guaranteed - allocated);
335                 else
336                         from_free = count;
337
338                 from_rsvd = count - from_free;
339
340                 if (free - from_free >= reserved)
341                         err = 0;
342                 else
343                         mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
344                                   slave, port, resource_str(res_type), free,
345                                   from_free, reserved);
346         }
347
348         if (!err) {
349                 /* grant the request */
350                 if (port > 0) {
351                         res_alloc->allocated[(port - 1) *
352                         (dev->persist->num_vfs + 1) + slave] += count;
353                         res_alloc->res_port_free[port - 1] -= count;
354                         res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
355                 } else {
356                         res_alloc->allocated[slave] += count;
357                         res_alloc->res_free -= count;
358                         res_alloc->res_reserved -= from_rsvd;
359                 }
360         }
361
362 out:
363         spin_unlock(&res_alloc->alloc_lock);
364         return err;
365 }
366
367 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
368                                     enum mlx4_resource res_type, int count,
369                                     int port)
370 {
371         struct mlx4_priv *priv = mlx4_priv(dev);
372         struct resource_allocator *res_alloc =
373                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
374         int allocated, guaranteed, from_rsvd;
375
376         if (slave > dev->persist->num_vfs)
377                 return;
378
379         spin_lock(&res_alloc->alloc_lock);
380
381         allocated = (port > 0) ?
382                 res_alloc->allocated[(port - 1) *
383                 (dev->persist->num_vfs + 1) + slave] :
384                 res_alloc->allocated[slave];
385         guaranteed = res_alloc->guaranteed[slave];
386
387         if (allocated - count >= guaranteed) {
388                 from_rsvd = 0;
389         } else {
390                 /* portion may need to be returned to reserved area */
391                 if (allocated - guaranteed > 0)
392                         from_rsvd = count - (allocated - guaranteed);
393                 else
394                         from_rsvd = count;
395         }
396
397         if (port > 0) {
398                 res_alloc->allocated[(port - 1) *
399                 (dev->persist->num_vfs + 1) + slave] -= count;
400                 res_alloc->res_port_free[port - 1] += count;
401                 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
402         } else {
403                 res_alloc->allocated[slave] -= count;
404                 res_alloc->res_free += count;
405                 res_alloc->res_reserved += from_rsvd;
406         }
407
408         spin_unlock(&res_alloc->alloc_lock);
409         return;
410 }
411
412 static inline void initialize_res_quotas(struct mlx4_dev *dev,
413                                          struct resource_allocator *res_alloc,
414                                          enum mlx4_resource res_type,
415                                          int vf, int num_instances)
416 {
417         res_alloc->guaranteed[vf] = num_instances /
418                                     (2 * (dev->persist->num_vfs + 1));
419         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
420         if (vf == mlx4_master_func_num(dev)) {
421                 res_alloc->res_free = num_instances;
422                 if (res_type == RES_MTT) {
423                         /* reserved mtts will be taken out of the PF allocation */
424                         res_alloc->res_free += dev->caps.reserved_mtts;
425                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
426                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
427                 }
428         }
429 }
430
431 void mlx4_init_quotas(struct mlx4_dev *dev)
432 {
433         struct mlx4_priv *priv = mlx4_priv(dev);
434         int pf;
435
436         /* quotas for VFs are initialized in mlx4_slave_cap */
437         if (mlx4_is_slave(dev))
438                 return;
439
440         if (!mlx4_is_mfunc(dev)) {
441                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
442                         mlx4_num_reserved_sqps(dev);
443                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
444                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
445                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
446                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
447                 return;
448         }
449
450         pf = mlx4_master_func_num(dev);
451         dev->quotas.qp =
452                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
453         dev->quotas.cq =
454                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
455         dev->quotas.srq =
456                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
457         dev->quotas.mtt =
458                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
459         dev->quotas.mpt =
460                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
461 }
462 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
463 {
464         struct mlx4_priv *priv = mlx4_priv(dev);
465         int i, j;
466         int t;
467
468         priv->mfunc.master.res_tracker.slave_list =
469                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
470                         GFP_KERNEL);
471         if (!priv->mfunc.master.res_tracker.slave_list)
472                 return -ENOMEM;
473
474         for (i = 0 ; i < dev->num_slaves; i++) {
475                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
476                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
477                                        slave_list[i].res_list[t]);
478                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
479         }
480
481         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
482                  dev->num_slaves);
483         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
484                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
485
486         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
487                 struct resource_allocator *res_alloc =
488                         &priv->mfunc.master.res_tracker.res_alloc[i];
489                 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
490                                            sizeof(int), GFP_KERNEL);
491                 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
492                                                 sizeof(int), GFP_KERNEL);
493                 if (i == RES_MAC || i == RES_VLAN)
494                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
495                                                        (dev->persist->num_vfs
496                                                        + 1) *
497                                                        sizeof(int), GFP_KERNEL);
498                 else
499                         res_alloc->allocated = kzalloc((dev->persist->
500                                                         num_vfs + 1) *
501                                                        sizeof(int), GFP_KERNEL);
502
503                 if (!res_alloc->quota || !res_alloc->guaranteed ||
504                     !res_alloc->allocated)
505                         goto no_mem_err;
506
507                 spin_lock_init(&res_alloc->alloc_lock);
508                 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
509                         struct mlx4_active_ports actv_ports =
510                                 mlx4_get_active_ports(dev, t);
511                         switch (i) {
512                         case RES_QP:
513                                 initialize_res_quotas(dev, res_alloc, RES_QP,
514                                                       t, dev->caps.num_qps -
515                                                       dev->caps.reserved_qps -
516                                                       mlx4_num_reserved_sqps(dev));
517                                 break;
518                         case RES_CQ:
519                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
520                                                       t, dev->caps.num_cqs -
521                                                       dev->caps.reserved_cqs);
522                                 break;
523                         case RES_SRQ:
524                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
525                                                       t, dev->caps.num_srqs -
526                                                       dev->caps.reserved_srqs);
527                                 break;
528                         case RES_MPT:
529                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
530                                                       t, dev->caps.num_mpts -
531                                                       dev->caps.reserved_mrws);
532                                 break;
533                         case RES_MTT:
534                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
535                                                       t, dev->caps.num_mtts -
536                                                       dev->caps.reserved_mtts);
537                                 break;
538                         case RES_MAC:
539                                 if (t == mlx4_master_func_num(dev)) {
540                                         int max_vfs_pport = 0;
541                                         /* Calculate the max vfs per port for */
542                                         /* both ports.                        */
543                                         for (j = 0; j < dev->caps.num_ports;
544                                              j++) {
545                                                 struct mlx4_slaves_pport slaves_pport =
546                                                         mlx4_phys_to_slaves_pport(dev, j + 1);
547                                                 unsigned current_slaves =
548                                                         bitmap_weight(slaves_pport.slaves,
549                                                                       dev->caps.num_ports) - 1;
550                                                 if (max_vfs_pport < current_slaves)
551                                                         max_vfs_pport =
552                                                                 current_slaves;
553                                         }
554                                         res_alloc->quota[t] =
555                                                 MLX4_MAX_MAC_NUM -
556                                                 2 * max_vfs_pport;
557                                         res_alloc->guaranteed[t] = 2;
558                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
559                                                 res_alloc->res_port_free[j] =
560                                                         MLX4_MAX_MAC_NUM;
561                                 } else {
562                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
563                                         res_alloc->guaranteed[t] = 2;
564                                 }
565                                 break;
566                         case RES_VLAN:
567                                 if (t == mlx4_master_func_num(dev)) {
568                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
569                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
570                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
571                                                 res_alloc->res_port_free[j] =
572                                                         res_alloc->quota[t];
573                                 } else {
574                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
575                                         res_alloc->guaranteed[t] = 0;
576                                 }
577                                 break;
578                         case RES_COUNTER:
579                                 res_alloc->quota[t] = dev->caps.max_counters;
580                                 res_alloc->guaranteed[t] = 0;
581                                 if (t == mlx4_master_func_num(dev))
582                                         res_alloc->res_free = res_alloc->quota[t];
583                                 break;
584                         default:
585                                 break;
586                         }
587                         if (i == RES_MAC || i == RES_VLAN) {
588                                 for (j = 0; j < dev->caps.num_ports; j++)
589                                         if (test_bit(j, actv_ports.ports))
590                                                 res_alloc->res_port_rsvd[j] +=
591                                                         res_alloc->guaranteed[t];
592                         } else {
593                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
594                         }
595                 }
596         }
597         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
598         return 0;
599
600 no_mem_err:
601         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
602                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
603                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
604                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
605                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
606                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
607                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
608         }
609         return -ENOMEM;
610 }
611
612 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
613                                 enum mlx4_res_tracker_free_type type)
614 {
615         struct mlx4_priv *priv = mlx4_priv(dev);
616         int i;
617
618         if (priv->mfunc.master.res_tracker.slave_list) {
619                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
620                         for (i = 0; i < dev->num_slaves; i++) {
621                                 if (type == RES_TR_FREE_ALL ||
622                                     dev->caps.function != i)
623                                         mlx4_delete_all_resources_for_slave(dev, i);
624                         }
625                         /* free master's vlans */
626                         i = dev->caps.function;
627                         mlx4_reset_roce_gids(dev, i);
628                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
629                         rem_slave_vlans(dev, i);
630                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
631                 }
632
633                 if (type != RES_TR_FREE_SLAVES_ONLY) {
634                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
635                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
636                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
637                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
638                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
639                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
640                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
641                         }
642                         kfree(priv->mfunc.master.res_tracker.slave_list);
643                         priv->mfunc.master.res_tracker.slave_list = NULL;
644                 }
645         }
646 }
647
648 static void update_pkey_index(struct mlx4_dev *dev, int slave,
649                               struct mlx4_cmd_mailbox *inbox)
650 {
651         u8 sched = *(u8 *)(inbox->buf + 64);
652         u8 orig_index = *(u8 *)(inbox->buf + 35);
653         u8 new_index;
654         struct mlx4_priv *priv = mlx4_priv(dev);
655         int port;
656
657         port = (sched >> 6 & 1) + 1;
658
659         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
660         *(u8 *)(inbox->buf + 35) = new_index;
661 }
662
663 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
664                        u8 slave)
665 {
666         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
667         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
668         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
669         int port;
670
671         if (MLX4_QP_ST_UD == ts) {
672                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
673                 if (mlx4_is_eth(dev, port))
674                         qp_ctx->pri_path.mgid_index =
675                                 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
676                 else
677                         qp_ctx->pri_path.mgid_index = slave | 0x80;
678
679         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
680                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
681                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
682                         if (mlx4_is_eth(dev, port)) {
683                                 qp_ctx->pri_path.mgid_index +=
684                                         mlx4_get_base_gid_ix(dev, slave, port);
685                                 qp_ctx->pri_path.mgid_index &= 0x7f;
686                         } else {
687                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
688                         }
689                 }
690                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
691                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
692                         if (mlx4_is_eth(dev, port)) {
693                                 qp_ctx->alt_path.mgid_index +=
694                                         mlx4_get_base_gid_ix(dev, slave, port);
695                                 qp_ctx->alt_path.mgid_index &= 0x7f;
696                         } else {
697                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
698                         }
699                 }
700         }
701 }
702
703 static int update_vport_qp_param(struct mlx4_dev *dev,
704                                  struct mlx4_cmd_mailbox *inbox,
705                                  u8 slave, u32 qpn)
706 {
707         struct mlx4_qp_context  *qpc = inbox->buf + 8;
708         struct mlx4_vport_oper_state *vp_oper;
709         struct mlx4_priv *priv;
710         u32 qp_type;
711         int port, err = 0;
712
713         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
714         priv = mlx4_priv(dev);
715         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
716         qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
717
718         if (MLX4_VGT != vp_oper->state.default_vlan) {
719                 /* the reserved QPs (special, proxy, tunnel)
720                  * do not operate over vlans
721                  */
722                 if (mlx4_is_qp_reserved(dev, qpn))
723                         return 0;
724
725                 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
726                 if (qp_type == MLX4_QP_ST_UD ||
727                     (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
728                         if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
729                                 *(__be32 *)inbox->buf =
730                                         cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
731                                         MLX4_QP_OPTPAR_VLAN_STRIPPING);
732                                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
733                         } else {
734                                 struct mlx4_update_qp_params params = {.flags = 0};
735
736                                 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
737                                 if (err)
738                                         goto out;
739                         }
740                 }
741
742                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
743                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
744                         qpc->pri_path.vlan_control =
745                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
746                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
747                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
748                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
749                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
750                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
751                 } else if (0 != vp_oper->state.default_vlan) {
752                         qpc->pri_path.vlan_control =
753                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
754                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
755                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
756                 } else { /* priority tagged */
757                         qpc->pri_path.vlan_control =
758                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
759                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
760                 }
761
762                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
763                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
764                 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
765                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
766                 qpc->pri_path.sched_queue &= 0xC7;
767                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
768                 qpc->qos_vport = vp_oper->state.qos_vport;
769         }
770         if (vp_oper->state.spoofchk) {
771                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
772                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
773         }
774 out:
775         return err;
776 }
777
778 static int mpt_mask(struct mlx4_dev *dev)
779 {
780         return dev->caps.num_mpts - 1;
781 }
782
783 static void *find_res(struct mlx4_dev *dev, u64 res_id,
784                       enum mlx4_resource type)
785 {
786         struct mlx4_priv *priv = mlx4_priv(dev);
787
788         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
789                                   res_id);
790 }
791
792 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
793                    enum mlx4_resource type,
794                    void *res)
795 {
796         struct res_common *r;
797         int err = 0;
798
799         spin_lock_irq(mlx4_tlock(dev));
800         r = find_res(dev, res_id, type);
801         if (!r) {
802                 err = -ENONET;
803                 goto exit;
804         }
805
806         if (r->state == RES_ANY_BUSY) {
807                 err = -EBUSY;
808                 goto exit;
809         }
810
811         if (r->owner != slave) {
812                 err = -EPERM;
813                 goto exit;
814         }
815
816         r->from_state = r->state;
817         r->state = RES_ANY_BUSY;
818
819         if (res)
820                 *((struct res_common **)res) = r;
821
822 exit:
823         spin_unlock_irq(mlx4_tlock(dev));
824         return err;
825 }
826
827 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
828                                     enum mlx4_resource type,
829                                     u64 res_id, int *slave)
830 {
831
832         struct res_common *r;
833         int err = -ENOENT;
834         int id = res_id;
835
836         if (type == RES_QP)
837                 id &= 0x7fffff;
838         spin_lock(mlx4_tlock(dev));
839
840         r = find_res(dev, id, type);
841         if (r) {
842                 *slave = r->owner;
843                 err = 0;
844         }
845         spin_unlock(mlx4_tlock(dev));
846
847         return err;
848 }
849
850 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
851                     enum mlx4_resource type)
852 {
853         struct res_common *r;
854
855         spin_lock_irq(mlx4_tlock(dev));
856         r = find_res(dev, res_id, type);
857         if (r)
858                 r->state = r->from_state;
859         spin_unlock_irq(mlx4_tlock(dev));
860 }
861
862 static struct res_common *alloc_qp_tr(int id)
863 {
864         struct res_qp *ret;
865
866         ret = kzalloc(sizeof *ret, GFP_KERNEL);
867         if (!ret)
868                 return NULL;
869
870         ret->com.res_id = id;
871         ret->com.state = RES_QP_RESERVED;
872         ret->local_qpn = id;
873         INIT_LIST_HEAD(&ret->mcg_list);
874         spin_lock_init(&ret->mcg_spl);
875         atomic_set(&ret->ref_count, 0);
876
877         return &ret->com;
878 }
879
880 static struct res_common *alloc_mtt_tr(int id, int order)
881 {
882         struct res_mtt *ret;
883
884         ret = kzalloc(sizeof *ret, GFP_KERNEL);
885         if (!ret)
886                 return NULL;
887
888         ret->com.res_id = id;
889         ret->order = order;
890         ret->com.state = RES_MTT_ALLOCATED;
891         atomic_set(&ret->ref_count, 0);
892
893         return &ret->com;
894 }
895
896 static struct res_common *alloc_mpt_tr(int id, int key)
897 {
898         struct res_mpt *ret;
899
900         ret = kzalloc(sizeof *ret, GFP_KERNEL);
901         if (!ret)
902                 return NULL;
903
904         ret->com.res_id = id;
905         ret->com.state = RES_MPT_RESERVED;
906         ret->key = key;
907
908         return &ret->com;
909 }
910
911 static struct res_common *alloc_eq_tr(int id)
912 {
913         struct res_eq *ret;
914
915         ret = kzalloc(sizeof *ret, GFP_KERNEL);
916         if (!ret)
917                 return NULL;
918
919         ret->com.res_id = id;
920         ret->com.state = RES_EQ_RESERVED;
921
922         return &ret->com;
923 }
924
925 static struct res_common *alloc_cq_tr(int id)
926 {
927         struct res_cq *ret;
928
929         ret = kzalloc(sizeof *ret, GFP_KERNEL);
930         if (!ret)
931                 return NULL;
932
933         ret->com.res_id = id;
934         ret->com.state = RES_CQ_ALLOCATED;
935         atomic_set(&ret->ref_count, 0);
936
937         return &ret->com;
938 }
939
940 static struct res_common *alloc_srq_tr(int id)
941 {
942         struct res_srq *ret;
943
944         ret = kzalloc(sizeof *ret, GFP_KERNEL);
945         if (!ret)
946                 return NULL;
947
948         ret->com.res_id = id;
949         ret->com.state = RES_SRQ_ALLOCATED;
950         atomic_set(&ret->ref_count, 0);
951
952         return &ret->com;
953 }
954
955 static struct res_common *alloc_counter_tr(int id)
956 {
957         struct res_counter *ret;
958
959         ret = kzalloc(sizeof *ret, GFP_KERNEL);
960         if (!ret)
961                 return NULL;
962
963         ret->com.res_id = id;
964         ret->com.state = RES_COUNTER_ALLOCATED;
965
966         return &ret->com;
967 }
968
969 static struct res_common *alloc_xrcdn_tr(int id)
970 {
971         struct res_xrcdn *ret;
972
973         ret = kzalloc(sizeof *ret, GFP_KERNEL);
974         if (!ret)
975                 return NULL;
976
977         ret->com.res_id = id;
978         ret->com.state = RES_XRCD_ALLOCATED;
979
980         return &ret->com;
981 }
982
983 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
984 {
985         struct res_fs_rule *ret;
986
987         ret = kzalloc(sizeof *ret, GFP_KERNEL);
988         if (!ret)
989                 return NULL;
990
991         ret->com.res_id = id;
992         ret->com.state = RES_FS_RULE_ALLOCATED;
993         ret->qpn = qpn;
994         return &ret->com;
995 }
996
997 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
998                                    int extra)
999 {
1000         struct res_common *ret;
1001
1002         switch (type) {
1003         case RES_QP:
1004                 ret = alloc_qp_tr(id);
1005                 break;
1006         case RES_MPT:
1007                 ret = alloc_mpt_tr(id, extra);
1008                 break;
1009         case RES_MTT:
1010                 ret = alloc_mtt_tr(id, extra);
1011                 break;
1012         case RES_EQ:
1013                 ret = alloc_eq_tr(id);
1014                 break;
1015         case RES_CQ:
1016                 ret = alloc_cq_tr(id);
1017                 break;
1018         case RES_SRQ:
1019                 ret = alloc_srq_tr(id);
1020                 break;
1021         case RES_MAC:
1022                 pr_err("implementation missing\n");
1023                 return NULL;
1024         case RES_COUNTER:
1025                 ret = alloc_counter_tr(id);
1026                 break;
1027         case RES_XRCD:
1028                 ret = alloc_xrcdn_tr(id);
1029                 break;
1030         case RES_FS_RULE:
1031                 ret = alloc_fs_rule_tr(id, extra);
1032                 break;
1033         default:
1034                 return NULL;
1035         }
1036         if (ret)
1037                 ret->owner = slave;
1038
1039         return ret;
1040 }
1041
1042 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1043                          enum mlx4_resource type, int extra)
1044 {
1045         int i;
1046         int err;
1047         struct mlx4_priv *priv = mlx4_priv(dev);
1048         struct res_common **res_arr;
1049         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1050         struct rb_root *root = &tracker->res_tree[type];
1051
1052         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1053         if (!res_arr)
1054                 return -ENOMEM;
1055
1056         for (i = 0; i < count; ++i) {
1057                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1058                 if (!res_arr[i]) {
1059                         for (--i; i >= 0; --i)
1060                                 kfree(res_arr[i]);
1061
1062                         kfree(res_arr);
1063                         return -ENOMEM;
1064                 }
1065         }
1066
1067         spin_lock_irq(mlx4_tlock(dev));
1068         for (i = 0; i < count; ++i) {
1069                 if (find_res(dev, base + i, type)) {
1070                         err = -EEXIST;
1071                         goto undo;
1072                 }
1073                 err = res_tracker_insert(root, res_arr[i]);
1074                 if (err)
1075                         goto undo;
1076                 list_add_tail(&res_arr[i]->list,
1077                               &tracker->slave_list[slave].res_list[type]);
1078         }
1079         spin_unlock_irq(mlx4_tlock(dev));
1080         kfree(res_arr);
1081
1082         return 0;
1083
1084 undo:
1085         for (--i; i >= base; --i)
1086                 rb_erase(&res_arr[i]->node, root);
1087
1088         spin_unlock_irq(mlx4_tlock(dev));
1089
1090         for (i = 0; i < count; ++i)
1091                 kfree(res_arr[i]);
1092
1093         kfree(res_arr);
1094
1095         return err;
1096 }
1097
1098 static int remove_qp_ok(struct res_qp *res)
1099 {
1100         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1101             !list_empty(&res->mcg_list)) {
1102                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1103                        res->com.state, atomic_read(&res->ref_count));
1104                 return -EBUSY;
1105         } else if (res->com.state != RES_QP_RESERVED) {
1106                 return -EPERM;
1107         }
1108
1109         return 0;
1110 }
1111
1112 static int remove_mtt_ok(struct res_mtt *res, int order)
1113 {
1114         if (res->com.state == RES_MTT_BUSY ||
1115             atomic_read(&res->ref_count)) {
1116                 pr_devel("%s-%d: state %s, ref_count %d\n",
1117                          __func__, __LINE__,
1118                          mtt_states_str(res->com.state),
1119                          atomic_read(&res->ref_count));
1120                 return -EBUSY;
1121         } else if (res->com.state != RES_MTT_ALLOCATED)
1122                 return -EPERM;
1123         else if (res->order != order)
1124                 return -EINVAL;
1125
1126         return 0;
1127 }
1128
1129 static int remove_mpt_ok(struct res_mpt *res)
1130 {
1131         if (res->com.state == RES_MPT_BUSY)
1132                 return -EBUSY;
1133         else if (res->com.state != RES_MPT_RESERVED)
1134                 return -EPERM;
1135
1136         return 0;
1137 }
1138
1139 static int remove_eq_ok(struct res_eq *res)
1140 {
1141         if (res->com.state == RES_MPT_BUSY)
1142                 return -EBUSY;
1143         else if (res->com.state != RES_MPT_RESERVED)
1144                 return -EPERM;
1145
1146         return 0;
1147 }
1148
1149 static int remove_counter_ok(struct res_counter *res)
1150 {
1151         if (res->com.state == RES_COUNTER_BUSY)
1152                 return -EBUSY;
1153         else if (res->com.state != RES_COUNTER_ALLOCATED)
1154                 return -EPERM;
1155
1156         return 0;
1157 }
1158
1159 static int remove_xrcdn_ok(struct res_xrcdn *res)
1160 {
1161         if (res->com.state == RES_XRCD_BUSY)
1162                 return -EBUSY;
1163         else if (res->com.state != RES_XRCD_ALLOCATED)
1164                 return -EPERM;
1165
1166         return 0;
1167 }
1168
1169 static int remove_fs_rule_ok(struct res_fs_rule *res)
1170 {
1171         if (res->com.state == RES_FS_RULE_BUSY)
1172                 return -EBUSY;
1173         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1174                 return -EPERM;
1175
1176         return 0;
1177 }
1178
1179 static int remove_cq_ok(struct res_cq *res)
1180 {
1181         if (res->com.state == RES_CQ_BUSY)
1182                 return -EBUSY;
1183         else if (res->com.state != RES_CQ_ALLOCATED)
1184                 return -EPERM;
1185
1186         return 0;
1187 }
1188
1189 static int remove_srq_ok(struct res_srq *res)
1190 {
1191         if (res->com.state == RES_SRQ_BUSY)
1192                 return -EBUSY;
1193         else if (res->com.state != RES_SRQ_ALLOCATED)
1194                 return -EPERM;
1195
1196         return 0;
1197 }
1198
1199 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1200 {
1201         switch (type) {
1202         case RES_QP:
1203                 return remove_qp_ok((struct res_qp *)res);
1204         case RES_CQ:
1205                 return remove_cq_ok((struct res_cq *)res);
1206         case RES_SRQ:
1207                 return remove_srq_ok((struct res_srq *)res);
1208         case RES_MPT:
1209                 return remove_mpt_ok((struct res_mpt *)res);
1210         case RES_MTT:
1211                 return remove_mtt_ok((struct res_mtt *)res, extra);
1212         case RES_MAC:
1213                 return -ENOSYS;
1214         case RES_EQ:
1215                 return remove_eq_ok((struct res_eq *)res);
1216         case RES_COUNTER:
1217                 return remove_counter_ok((struct res_counter *)res);
1218         case RES_XRCD:
1219                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1220         case RES_FS_RULE:
1221                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1222         default:
1223                 return -EINVAL;
1224         }
1225 }
1226
1227 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1228                          enum mlx4_resource type, int extra)
1229 {
1230         u64 i;
1231         int err;
1232         struct mlx4_priv *priv = mlx4_priv(dev);
1233         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1234         struct res_common *r;
1235
1236         spin_lock_irq(mlx4_tlock(dev));
1237         for (i = base; i < base + count; ++i) {
1238                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1239                 if (!r) {
1240                         err = -ENOENT;
1241                         goto out;
1242                 }
1243                 if (r->owner != slave) {
1244                         err = -EPERM;
1245                         goto out;
1246                 }
1247                 err = remove_ok(r, type, extra);
1248                 if (err)
1249                         goto out;
1250         }
1251
1252         for (i = base; i < base + count; ++i) {
1253                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1254                 rb_erase(&r->node, &tracker->res_tree[type]);
1255                 list_del(&r->list);
1256                 kfree(r);
1257         }
1258         err = 0;
1259
1260 out:
1261         spin_unlock_irq(mlx4_tlock(dev));
1262
1263         return err;
1264 }
1265
1266 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1267                                 enum res_qp_states state, struct res_qp **qp,
1268                                 int alloc)
1269 {
1270         struct mlx4_priv *priv = mlx4_priv(dev);
1271         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1272         struct res_qp *r;
1273         int err = 0;
1274
1275         spin_lock_irq(mlx4_tlock(dev));
1276         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1277         if (!r)
1278                 err = -ENOENT;
1279         else if (r->com.owner != slave)
1280                 err = -EPERM;
1281         else {
1282                 switch (state) {
1283                 case RES_QP_BUSY:
1284                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1285                                  __func__, r->com.res_id);
1286                         err = -EBUSY;
1287                         break;
1288
1289                 case RES_QP_RESERVED:
1290                         if (r->com.state == RES_QP_MAPPED && !alloc)
1291                                 break;
1292
1293                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1294                         err = -EINVAL;
1295                         break;
1296
1297                 case RES_QP_MAPPED:
1298                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1299                             r->com.state == RES_QP_HW)
1300                                 break;
1301                         else {
1302                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1303                                           r->com.res_id);
1304                                 err = -EINVAL;
1305                         }
1306
1307                         break;
1308
1309                 case RES_QP_HW:
1310                         if (r->com.state != RES_QP_MAPPED)
1311                                 err = -EINVAL;
1312                         break;
1313                 default:
1314                         err = -EINVAL;
1315                 }
1316
1317                 if (!err) {
1318                         r->com.from_state = r->com.state;
1319                         r->com.to_state = state;
1320                         r->com.state = RES_QP_BUSY;
1321                         if (qp)
1322                                 *qp = r;
1323                 }
1324         }
1325
1326         spin_unlock_irq(mlx4_tlock(dev));
1327
1328         return err;
1329 }
1330
1331 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1332                                 enum res_mpt_states state, struct res_mpt **mpt)
1333 {
1334         struct mlx4_priv *priv = mlx4_priv(dev);
1335         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1336         struct res_mpt *r;
1337         int err = 0;
1338
1339         spin_lock_irq(mlx4_tlock(dev));
1340         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1341         if (!r)
1342                 err = -ENOENT;
1343         else if (r->com.owner != slave)
1344                 err = -EPERM;
1345         else {
1346                 switch (state) {
1347                 case RES_MPT_BUSY:
1348                         err = -EINVAL;
1349                         break;
1350
1351                 case RES_MPT_RESERVED:
1352                         if (r->com.state != RES_MPT_MAPPED)
1353                                 err = -EINVAL;
1354                         break;
1355
1356                 case RES_MPT_MAPPED:
1357                         if (r->com.state != RES_MPT_RESERVED &&
1358                             r->com.state != RES_MPT_HW)
1359                                 err = -EINVAL;
1360                         break;
1361
1362                 case RES_MPT_HW:
1363                         if (r->com.state != RES_MPT_MAPPED)
1364                                 err = -EINVAL;
1365                         break;
1366                 default:
1367                         err = -EINVAL;
1368                 }
1369
1370                 if (!err) {
1371                         r->com.from_state = r->com.state;
1372                         r->com.to_state = state;
1373                         r->com.state = RES_MPT_BUSY;
1374                         if (mpt)
1375                                 *mpt = r;
1376                 }
1377         }
1378
1379         spin_unlock_irq(mlx4_tlock(dev));
1380
1381         return err;
1382 }
1383
1384 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1385                                 enum res_eq_states state, struct res_eq **eq)
1386 {
1387         struct mlx4_priv *priv = mlx4_priv(dev);
1388         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1389         struct res_eq *r;
1390         int err = 0;
1391
1392         spin_lock_irq(mlx4_tlock(dev));
1393         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1394         if (!r)
1395                 err = -ENOENT;
1396         else if (r->com.owner != slave)
1397                 err = -EPERM;
1398         else {
1399                 switch (state) {
1400                 case RES_EQ_BUSY:
1401                         err = -EINVAL;
1402                         break;
1403
1404                 case RES_EQ_RESERVED:
1405                         if (r->com.state != RES_EQ_HW)
1406                                 err = -EINVAL;
1407                         break;
1408
1409                 case RES_EQ_HW:
1410                         if (r->com.state != RES_EQ_RESERVED)
1411                                 err = -EINVAL;
1412                         break;
1413
1414                 default:
1415                         err = -EINVAL;
1416                 }
1417
1418                 if (!err) {
1419                         r->com.from_state = r->com.state;
1420                         r->com.to_state = state;
1421                         r->com.state = RES_EQ_BUSY;
1422                         if (eq)
1423                                 *eq = r;
1424                 }
1425         }
1426
1427         spin_unlock_irq(mlx4_tlock(dev));
1428
1429         return err;
1430 }
1431
1432 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1433                                 enum res_cq_states state, struct res_cq **cq)
1434 {
1435         struct mlx4_priv *priv = mlx4_priv(dev);
1436         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1437         struct res_cq *r;
1438         int err;
1439
1440         spin_lock_irq(mlx4_tlock(dev));
1441         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1442         if (!r) {
1443                 err = -ENOENT;
1444         } else if (r->com.owner != slave) {
1445                 err = -EPERM;
1446         } else if (state == RES_CQ_ALLOCATED) {
1447                 if (r->com.state != RES_CQ_HW)
1448                         err = -EINVAL;
1449                 else if (atomic_read(&r->ref_count))
1450                         err = -EBUSY;
1451                 else
1452                         err = 0;
1453         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1454                 err = -EINVAL;
1455         } else {
1456                 err = 0;
1457         }
1458
1459         if (!err) {
1460                 r->com.from_state = r->com.state;
1461                 r->com.to_state = state;
1462                 r->com.state = RES_CQ_BUSY;
1463                 if (cq)
1464                         *cq = r;
1465         }
1466
1467         spin_unlock_irq(mlx4_tlock(dev));
1468
1469         return err;
1470 }
1471
1472 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1473                                  enum res_srq_states state, struct res_srq **srq)
1474 {
1475         struct mlx4_priv *priv = mlx4_priv(dev);
1476         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1477         struct res_srq *r;
1478         int err = 0;
1479
1480         spin_lock_irq(mlx4_tlock(dev));
1481         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1482         if (!r) {
1483                 err = -ENOENT;
1484         } else if (r->com.owner != slave) {
1485                 err = -EPERM;
1486         } else if (state == RES_SRQ_ALLOCATED) {
1487                 if (r->com.state != RES_SRQ_HW)
1488                         err = -EINVAL;
1489                 else if (atomic_read(&r->ref_count))
1490                         err = -EBUSY;
1491         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1492                 err = -EINVAL;
1493         }
1494
1495         if (!err) {
1496                 r->com.from_state = r->com.state;
1497                 r->com.to_state = state;
1498                 r->com.state = RES_SRQ_BUSY;
1499                 if (srq)
1500                         *srq = r;
1501         }
1502
1503         spin_unlock_irq(mlx4_tlock(dev));
1504
1505         return err;
1506 }
1507
1508 static void res_abort_move(struct mlx4_dev *dev, int slave,
1509                            enum mlx4_resource type, int id)
1510 {
1511         struct mlx4_priv *priv = mlx4_priv(dev);
1512         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1513         struct res_common *r;
1514
1515         spin_lock_irq(mlx4_tlock(dev));
1516         r = res_tracker_lookup(&tracker->res_tree[type], id);
1517         if (r && (r->owner == slave))
1518                 r->state = r->from_state;
1519         spin_unlock_irq(mlx4_tlock(dev));
1520 }
1521
1522 static void res_end_move(struct mlx4_dev *dev, int slave,
1523                          enum mlx4_resource type, int id)
1524 {
1525         struct mlx4_priv *priv = mlx4_priv(dev);
1526         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1527         struct res_common *r;
1528
1529         spin_lock_irq(mlx4_tlock(dev));
1530         r = res_tracker_lookup(&tracker->res_tree[type], id);
1531         if (r && (r->owner == slave))
1532                 r->state = r->to_state;
1533         spin_unlock_irq(mlx4_tlock(dev));
1534 }
1535
1536 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1537 {
1538         return mlx4_is_qp_reserved(dev, qpn) &&
1539                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1540 }
1541
1542 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1543 {
1544         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1545 }
1546
1547 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1548                         u64 in_param, u64 *out_param)
1549 {
1550         int err;
1551         int count;
1552         int align;
1553         int base;
1554         int qpn;
1555         u8 flags;
1556
1557         switch (op) {
1558         case RES_OP_RESERVE:
1559                 count = get_param_l(&in_param) & 0xffffff;
1560                 /* Turn off all unsupported QP allocation flags that the
1561                  * slave tries to set.
1562                  */
1563                 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1564                 align = get_param_h(&in_param);
1565                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1566                 if (err)
1567                         return err;
1568
1569                 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1570                 if (err) {
1571                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1572                         return err;
1573                 }
1574
1575                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1576                 if (err) {
1577                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1578                         __mlx4_qp_release_range(dev, base, count);
1579                         return err;
1580                 }
1581                 set_param_l(out_param, base);
1582                 break;
1583         case RES_OP_MAP_ICM:
1584                 qpn = get_param_l(&in_param) & 0x7fffff;
1585                 if (valid_reserved(dev, slave, qpn)) {
1586                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1587                         if (err)
1588                                 return err;
1589                 }
1590
1591                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1592                                            NULL, 1);
1593                 if (err)
1594                         return err;
1595
1596                 if (!fw_reserved(dev, qpn)) {
1597                         err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1598                         if (err) {
1599                                 res_abort_move(dev, slave, RES_QP, qpn);
1600                                 return err;
1601                         }
1602                 }
1603
1604                 res_end_move(dev, slave, RES_QP, qpn);
1605                 break;
1606
1607         default:
1608                 err = -EINVAL;
1609                 break;
1610         }
1611         return err;
1612 }
1613
1614 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1615                          u64 in_param, u64 *out_param)
1616 {
1617         int err = -EINVAL;
1618         int base;
1619         int order;
1620
1621         if (op != RES_OP_RESERVE_AND_MAP)
1622                 return err;
1623
1624         order = get_param_l(&in_param);
1625
1626         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1627         if (err)
1628                 return err;
1629
1630         base = __mlx4_alloc_mtt_range(dev, order);
1631         if (base == -1) {
1632                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1633                 return -ENOMEM;
1634         }
1635
1636         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1637         if (err) {
1638                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1639                 __mlx4_free_mtt_range(dev, base, order);
1640         } else {
1641                 set_param_l(out_param, base);
1642         }
1643
1644         return err;
1645 }
1646
1647 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1648                          u64 in_param, u64 *out_param)
1649 {
1650         int err = -EINVAL;
1651         int index;
1652         int id;
1653         struct res_mpt *mpt;
1654
1655         switch (op) {
1656         case RES_OP_RESERVE:
1657                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1658                 if (err)
1659                         break;
1660
1661                 index = __mlx4_mpt_reserve(dev);
1662                 if (index == -1) {
1663                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1664                         break;
1665                 }
1666                 id = index & mpt_mask(dev);
1667
1668                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1669                 if (err) {
1670                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1671                         __mlx4_mpt_release(dev, index);
1672                         break;
1673                 }
1674                 set_param_l(out_param, index);
1675                 break;
1676         case RES_OP_MAP_ICM:
1677                 index = get_param_l(&in_param);
1678                 id = index & mpt_mask(dev);
1679                 err = mr_res_start_move_to(dev, slave, id,
1680                                            RES_MPT_MAPPED, &mpt);
1681                 if (err)
1682                         return err;
1683
1684                 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1685                 if (err) {
1686                         res_abort_move(dev, slave, RES_MPT, id);
1687                         return err;
1688                 }
1689
1690                 res_end_move(dev, slave, RES_MPT, id);
1691                 break;
1692         }
1693         return err;
1694 }
1695
1696 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1697                         u64 in_param, u64 *out_param)
1698 {
1699         int cqn;
1700         int err;
1701
1702         switch (op) {
1703         case RES_OP_RESERVE_AND_MAP:
1704                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1705                 if (err)
1706                         break;
1707
1708                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1709                 if (err) {
1710                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1711                         break;
1712                 }
1713
1714                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1715                 if (err) {
1716                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1717                         __mlx4_cq_free_icm(dev, cqn);
1718                         break;
1719                 }
1720
1721                 set_param_l(out_param, cqn);
1722                 break;
1723
1724         default:
1725                 err = -EINVAL;
1726         }
1727
1728         return err;
1729 }
1730
1731 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1732                          u64 in_param, u64 *out_param)
1733 {
1734         int srqn;
1735         int err;
1736
1737         switch (op) {
1738         case RES_OP_RESERVE_AND_MAP:
1739                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1740                 if (err)
1741                         break;
1742
1743                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1744                 if (err) {
1745                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1746                         break;
1747                 }
1748
1749                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1750                 if (err) {
1751                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1752                         __mlx4_srq_free_icm(dev, srqn);
1753                         break;
1754                 }
1755
1756                 set_param_l(out_param, srqn);
1757                 break;
1758
1759         default:
1760                 err = -EINVAL;
1761         }
1762
1763         return err;
1764 }
1765
1766 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1767                                      u8 smac_index, u64 *mac)
1768 {
1769         struct mlx4_priv *priv = mlx4_priv(dev);
1770         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1771         struct list_head *mac_list =
1772                 &tracker->slave_list[slave].res_list[RES_MAC];
1773         struct mac_res *res, *tmp;
1774
1775         list_for_each_entry_safe(res, tmp, mac_list, list) {
1776                 if (res->smac_index == smac_index && res->port == (u8) port) {
1777                         *mac = res->mac;
1778                         return 0;
1779                 }
1780         }
1781         return -ENOENT;
1782 }
1783
1784 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1785 {
1786         struct mlx4_priv *priv = mlx4_priv(dev);
1787         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1788         struct list_head *mac_list =
1789                 &tracker->slave_list[slave].res_list[RES_MAC];
1790         struct mac_res *res, *tmp;
1791
1792         list_for_each_entry_safe(res, tmp, mac_list, list) {
1793                 if (res->mac == mac && res->port == (u8) port) {
1794                         /* mac found. update ref count */
1795                         ++res->ref_count;
1796                         return 0;
1797                 }
1798         }
1799
1800         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1801                 return -EINVAL;
1802         res = kzalloc(sizeof *res, GFP_KERNEL);
1803         if (!res) {
1804                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1805                 return -ENOMEM;
1806         }
1807         res->mac = mac;
1808         res->port = (u8) port;
1809         res->smac_index = smac_index;
1810         res->ref_count = 1;
1811         list_add_tail(&res->list,
1812                       &tracker->slave_list[slave].res_list[RES_MAC]);
1813         return 0;
1814 }
1815
1816 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1817                                int port)
1818 {
1819         struct mlx4_priv *priv = mlx4_priv(dev);
1820         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1821         struct list_head *mac_list =
1822                 &tracker->slave_list[slave].res_list[RES_MAC];
1823         struct mac_res *res, *tmp;
1824
1825         list_for_each_entry_safe(res, tmp, mac_list, list) {
1826                 if (res->mac == mac && res->port == (u8) port) {
1827                         if (!--res->ref_count) {
1828                                 list_del(&res->list);
1829                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1830                                 kfree(res);
1831                         }
1832                         break;
1833                 }
1834         }
1835 }
1836
1837 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1838 {
1839         struct mlx4_priv *priv = mlx4_priv(dev);
1840         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1841         struct list_head *mac_list =
1842                 &tracker->slave_list[slave].res_list[RES_MAC];
1843         struct mac_res *res, *tmp;
1844         int i;
1845
1846         list_for_each_entry_safe(res, tmp, mac_list, list) {
1847                 list_del(&res->list);
1848                 /* dereference the mac the num times the slave referenced it */
1849                 for (i = 0; i < res->ref_count; i++)
1850                         __mlx4_unregister_mac(dev, res->port, res->mac);
1851                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1852                 kfree(res);
1853         }
1854 }
1855
1856 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1857                          u64 in_param, u64 *out_param, int in_port)
1858 {
1859         int err = -EINVAL;
1860         int port;
1861         u64 mac;
1862         u8 smac_index;
1863
1864         if (op != RES_OP_RESERVE_AND_MAP)
1865                 return err;
1866
1867         port = !in_port ? get_param_l(out_param) : in_port;
1868         port = mlx4_slave_convert_port(
1869                         dev, slave, port);
1870
1871         if (port < 0)
1872                 return -EINVAL;
1873         mac = in_param;
1874
1875         err = __mlx4_register_mac(dev, port, mac);
1876         if (err >= 0) {
1877                 smac_index = err;
1878                 set_param_l(out_param, err);
1879                 err = 0;
1880         }
1881
1882         if (!err) {
1883                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1884                 if (err)
1885                         __mlx4_unregister_mac(dev, port, mac);
1886         }
1887         return err;
1888 }
1889
1890 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1891                              int port, int vlan_index)
1892 {
1893         struct mlx4_priv *priv = mlx4_priv(dev);
1894         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1895         struct list_head *vlan_list =
1896                 &tracker->slave_list[slave].res_list[RES_VLAN];
1897         struct vlan_res *res, *tmp;
1898
1899         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1900                 if (res->vlan == vlan && res->port == (u8) port) {
1901                         /* vlan found. update ref count */
1902                         ++res->ref_count;
1903                         return 0;
1904                 }
1905         }
1906
1907         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1908                 return -EINVAL;
1909         res = kzalloc(sizeof(*res), GFP_KERNEL);
1910         if (!res) {
1911                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1912                 return -ENOMEM;
1913         }
1914         res->vlan = vlan;
1915         res->port = (u8) port;
1916         res->vlan_index = vlan_index;
1917         res->ref_count = 1;
1918         list_add_tail(&res->list,
1919                       &tracker->slave_list[slave].res_list[RES_VLAN]);
1920         return 0;
1921 }
1922
1923
1924 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1925                                 int port)
1926 {
1927         struct mlx4_priv *priv = mlx4_priv(dev);
1928         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1929         struct list_head *vlan_list =
1930                 &tracker->slave_list[slave].res_list[RES_VLAN];
1931         struct vlan_res *res, *tmp;
1932
1933         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1934                 if (res->vlan == vlan && res->port == (u8) port) {
1935                         if (!--res->ref_count) {
1936                                 list_del(&res->list);
1937                                 mlx4_release_resource(dev, slave, RES_VLAN,
1938                                                       1, port);
1939                                 kfree(res);
1940                         }
1941                         break;
1942                 }
1943         }
1944 }
1945
1946 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1947 {
1948         struct mlx4_priv *priv = mlx4_priv(dev);
1949         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1950         struct list_head *vlan_list =
1951                 &tracker->slave_list[slave].res_list[RES_VLAN];
1952         struct vlan_res *res, *tmp;
1953         int i;
1954
1955         list_for_each_entry_safe(res, tmp, vlan_list, list) {
1956                 list_del(&res->list);
1957                 /* dereference the vlan the num times the slave referenced it */
1958                 for (i = 0; i < res->ref_count; i++)
1959                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
1960                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1961                 kfree(res);
1962         }
1963 }
1964
1965 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1966                           u64 in_param, u64 *out_param, int in_port)
1967 {
1968         struct mlx4_priv *priv = mlx4_priv(dev);
1969         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1970         int err;
1971         u16 vlan;
1972         int vlan_index;
1973         int port;
1974
1975         port = !in_port ? get_param_l(out_param) : in_port;
1976
1977         if (!port || op != RES_OP_RESERVE_AND_MAP)
1978                 return -EINVAL;
1979
1980         port = mlx4_slave_convert_port(
1981                         dev, slave, port);
1982
1983         if (port < 0)
1984                 return -EINVAL;
1985         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1986         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1987                 slave_state[slave].old_vlan_api = true;
1988                 return 0;
1989         }
1990
1991         vlan = (u16) in_param;
1992
1993         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1994         if (!err) {
1995                 set_param_l(out_param, (u32) vlan_index);
1996                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1997                 if (err)
1998                         __mlx4_unregister_vlan(dev, port, vlan);
1999         }
2000         return err;
2001 }
2002
2003 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2004                              u64 in_param, u64 *out_param)
2005 {
2006         u32 index;
2007         int err;
2008
2009         if (op != RES_OP_RESERVE)
2010                 return -EINVAL;
2011
2012         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2013         if (err)
2014                 return err;
2015
2016         err = __mlx4_counter_alloc(dev, &index);
2017         if (err) {
2018                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2019                 return err;
2020         }
2021
2022         err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2023         if (err) {
2024                 __mlx4_counter_free(dev, index);
2025                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2026         } else {
2027                 set_param_l(out_param, index);
2028         }
2029
2030         return err;
2031 }
2032
2033 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2034                            u64 in_param, u64 *out_param)
2035 {
2036         u32 xrcdn;
2037         int err;
2038
2039         if (op != RES_OP_RESERVE)
2040                 return -EINVAL;
2041
2042         err = __mlx4_xrcd_alloc(dev, &xrcdn);
2043         if (err)
2044                 return err;
2045
2046         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2047         if (err)
2048                 __mlx4_xrcd_free(dev, xrcdn);
2049         else
2050                 set_param_l(out_param, xrcdn);
2051
2052         return err;
2053 }
2054
2055 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2056                            struct mlx4_vhcr *vhcr,
2057                            struct mlx4_cmd_mailbox *inbox,
2058                            struct mlx4_cmd_mailbox *outbox,
2059                            struct mlx4_cmd_info *cmd)
2060 {
2061         int err;
2062         int alop = vhcr->op_modifier;
2063
2064         switch (vhcr->in_modifier & 0xFF) {
2065         case RES_QP:
2066                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2067                                    vhcr->in_param, &vhcr->out_param);
2068                 break;
2069
2070         case RES_MTT:
2071                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2072                                     vhcr->in_param, &vhcr->out_param);
2073                 break;
2074
2075         case RES_MPT:
2076                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2077                                     vhcr->in_param, &vhcr->out_param);
2078                 break;
2079
2080         case RES_CQ:
2081                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2082                                    vhcr->in_param, &vhcr->out_param);
2083                 break;
2084
2085         case RES_SRQ:
2086                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2087                                     vhcr->in_param, &vhcr->out_param);
2088                 break;
2089
2090         case RES_MAC:
2091                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2092                                     vhcr->in_param, &vhcr->out_param,
2093                                     (vhcr->in_modifier >> 8) & 0xFF);
2094                 break;
2095
2096         case RES_VLAN:
2097                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2098                                      vhcr->in_param, &vhcr->out_param,
2099                                      (vhcr->in_modifier >> 8) & 0xFF);
2100                 break;
2101
2102         case RES_COUNTER:
2103                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2104                                         vhcr->in_param, &vhcr->out_param);
2105                 break;
2106
2107         case RES_XRCD:
2108                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2109                                       vhcr->in_param, &vhcr->out_param);
2110                 break;
2111
2112         default:
2113                 err = -EINVAL;
2114                 break;
2115         }
2116
2117         return err;
2118 }
2119
2120 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2121                        u64 in_param)
2122 {
2123         int err;
2124         int count;
2125         int base;
2126         int qpn;
2127
2128         switch (op) {
2129         case RES_OP_RESERVE:
2130                 base = get_param_l(&in_param) & 0x7fffff;
2131                 count = get_param_h(&in_param);
2132                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2133                 if (err)
2134                         break;
2135                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2136                 __mlx4_qp_release_range(dev, base, count);
2137                 break;
2138         case RES_OP_MAP_ICM:
2139                 qpn = get_param_l(&in_param) & 0x7fffff;
2140                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2141                                            NULL, 0);
2142                 if (err)
2143                         return err;
2144
2145                 if (!fw_reserved(dev, qpn))
2146                         __mlx4_qp_free_icm(dev, qpn);
2147
2148                 res_end_move(dev, slave, RES_QP, qpn);
2149
2150                 if (valid_reserved(dev, slave, qpn))
2151                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2152                 break;
2153         default:
2154                 err = -EINVAL;
2155                 break;
2156         }
2157         return err;
2158 }
2159
2160 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2161                         u64 in_param, u64 *out_param)
2162 {
2163         int err = -EINVAL;
2164         int base;
2165         int order;
2166
2167         if (op != RES_OP_RESERVE_AND_MAP)
2168                 return err;
2169
2170         base = get_param_l(&in_param);
2171         order = get_param_h(&in_param);
2172         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2173         if (!err) {
2174                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2175                 __mlx4_free_mtt_range(dev, base, order);
2176         }
2177         return err;
2178 }
2179
2180 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2181                         u64 in_param)
2182 {
2183         int err = -EINVAL;
2184         int index;
2185         int id;
2186         struct res_mpt *mpt;
2187
2188         switch (op) {
2189         case RES_OP_RESERVE:
2190                 index = get_param_l(&in_param);
2191                 id = index & mpt_mask(dev);
2192                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2193                 if (err)
2194                         break;
2195                 index = mpt->key;
2196                 put_res(dev, slave, id, RES_MPT);
2197
2198                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2199                 if (err)
2200                         break;
2201                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2202                 __mlx4_mpt_release(dev, index);
2203                 break;
2204         case RES_OP_MAP_ICM:
2205                         index = get_param_l(&in_param);
2206                         id = index & mpt_mask(dev);
2207                         err = mr_res_start_move_to(dev, slave, id,
2208                                                    RES_MPT_RESERVED, &mpt);
2209                         if (err)
2210                                 return err;
2211
2212                         __mlx4_mpt_free_icm(dev, mpt->key);
2213                         res_end_move(dev, slave, RES_MPT, id);
2214                         return err;
2215                 break;
2216         default:
2217                 err = -EINVAL;
2218                 break;
2219         }
2220         return err;
2221 }
2222
2223 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2224                        u64 in_param, u64 *out_param)
2225 {
2226         int cqn;
2227         int err;
2228
2229         switch (op) {
2230         case RES_OP_RESERVE_AND_MAP:
2231                 cqn = get_param_l(&in_param);
2232                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2233                 if (err)
2234                         break;
2235
2236                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2237                 __mlx4_cq_free_icm(dev, cqn);
2238                 break;
2239
2240         default:
2241                 err = -EINVAL;
2242                 break;
2243         }
2244
2245         return err;
2246 }
2247
2248 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2249                         u64 in_param, u64 *out_param)
2250 {
2251         int srqn;
2252         int err;
2253
2254         switch (op) {
2255         case RES_OP_RESERVE_AND_MAP:
2256                 srqn = get_param_l(&in_param);
2257                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2258                 if (err)
2259                         break;
2260
2261                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2262                 __mlx4_srq_free_icm(dev, srqn);
2263                 break;
2264
2265         default:
2266                 err = -EINVAL;
2267                 break;
2268         }
2269
2270         return err;
2271 }
2272
2273 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2274                             u64 in_param, u64 *out_param, int in_port)
2275 {
2276         int port;
2277         int err = 0;
2278
2279         switch (op) {
2280         case RES_OP_RESERVE_AND_MAP:
2281                 port = !in_port ? get_param_l(out_param) : in_port;
2282                 port = mlx4_slave_convert_port(
2283                                 dev, slave, port);
2284
2285                 if (port < 0)
2286                         return -EINVAL;
2287                 mac_del_from_slave(dev, slave, in_param, port);
2288                 __mlx4_unregister_mac(dev, port, in_param);
2289                 break;
2290         default:
2291                 err = -EINVAL;
2292                 break;
2293         }
2294
2295         return err;
2296
2297 }
2298
2299 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2300                             u64 in_param, u64 *out_param, int port)
2301 {
2302         struct mlx4_priv *priv = mlx4_priv(dev);
2303         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2304         int err = 0;
2305
2306         port = mlx4_slave_convert_port(
2307                         dev, slave, port);
2308
2309         if (port < 0)
2310                 return -EINVAL;
2311         switch (op) {
2312         case RES_OP_RESERVE_AND_MAP:
2313                 if (slave_state[slave].old_vlan_api)
2314                         return 0;
2315                 if (!port)
2316                         return -EINVAL;
2317                 vlan_del_from_slave(dev, slave, in_param, port);
2318                 __mlx4_unregister_vlan(dev, port, in_param);
2319                 break;
2320         default:
2321                 err = -EINVAL;
2322                 break;
2323         }
2324
2325         return err;
2326 }
2327
2328 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2329                             u64 in_param, u64 *out_param)
2330 {
2331         int index;
2332         int err;
2333
2334         if (op != RES_OP_RESERVE)
2335                 return -EINVAL;
2336
2337         index = get_param_l(&in_param);
2338         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2339         if (err)
2340                 return err;
2341
2342         __mlx4_counter_free(dev, index);
2343         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2344
2345         return err;
2346 }
2347
2348 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2349                           u64 in_param, u64 *out_param)
2350 {
2351         int xrcdn;
2352         int err;
2353
2354         if (op != RES_OP_RESERVE)
2355                 return -EINVAL;
2356
2357         xrcdn = get_param_l(&in_param);
2358         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2359         if (err)
2360                 return err;
2361
2362         __mlx4_xrcd_free(dev, xrcdn);
2363
2364         return err;
2365 }
2366
2367 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2368                           struct mlx4_vhcr *vhcr,
2369                           struct mlx4_cmd_mailbox *inbox,
2370                           struct mlx4_cmd_mailbox *outbox,
2371                           struct mlx4_cmd_info *cmd)
2372 {
2373         int err = -EINVAL;
2374         int alop = vhcr->op_modifier;
2375
2376         switch (vhcr->in_modifier & 0xFF) {
2377         case RES_QP:
2378                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2379                                   vhcr->in_param);
2380                 break;
2381
2382         case RES_MTT:
2383                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2384                                    vhcr->in_param, &vhcr->out_param);
2385                 break;
2386
2387         case RES_MPT:
2388                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2389                                    vhcr->in_param);
2390                 break;
2391
2392         case RES_CQ:
2393                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2394                                   vhcr->in_param, &vhcr->out_param);
2395                 break;
2396
2397         case RES_SRQ:
2398                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2399                                    vhcr->in_param, &vhcr->out_param);
2400                 break;
2401
2402         case RES_MAC:
2403                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2404                                    vhcr->in_param, &vhcr->out_param,
2405                                    (vhcr->in_modifier >> 8) & 0xFF);
2406                 break;
2407
2408         case RES_VLAN:
2409                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2410                                     vhcr->in_param, &vhcr->out_param,
2411                                     (vhcr->in_modifier >> 8) & 0xFF);
2412                 break;
2413
2414         case RES_COUNTER:
2415                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2416                                        vhcr->in_param, &vhcr->out_param);
2417                 break;
2418
2419         case RES_XRCD:
2420                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2421                                      vhcr->in_param, &vhcr->out_param);
2422
2423         default:
2424                 break;
2425         }
2426         return err;
2427 }
2428
2429 /* ugly but other choices are uglier */
2430 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2431 {
2432         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2433 }
2434
2435 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2436 {
2437         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2438 }
2439
2440 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2441 {
2442         return be32_to_cpu(mpt->mtt_sz);
2443 }
2444
2445 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2446 {
2447         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2448 }
2449
2450 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2451 {
2452         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2453 }
2454
2455 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2456 {
2457         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2458 }
2459
2460 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2461 {
2462         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2463 }
2464
2465 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2466 {
2467         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2468 }
2469
2470 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2471 {
2472         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2473 }
2474
2475 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2476 {
2477         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2478         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2479         int log_sq_sride = qpc->sq_size_stride & 7;
2480         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2481         int log_rq_stride = qpc->rq_size_stride & 7;
2482         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2483         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2484         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2485         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2486         int sq_size;
2487         int rq_size;
2488         int total_pages;
2489         int total_mem;
2490         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2491
2492         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2493         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2494         total_mem = sq_size + rq_size;
2495         total_pages =
2496                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2497                                    page_shift);
2498
2499         return total_pages;
2500 }
2501
2502 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2503                            int size, struct res_mtt *mtt)
2504 {
2505         int res_start = mtt->com.res_id;
2506         int res_size = (1 << mtt->order);
2507
2508         if (start < res_start || start + size > res_start + res_size)
2509                 return -EPERM;
2510         return 0;
2511 }
2512
2513 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2514                            struct mlx4_vhcr *vhcr,
2515                            struct mlx4_cmd_mailbox *inbox,
2516                            struct mlx4_cmd_mailbox *outbox,
2517                            struct mlx4_cmd_info *cmd)
2518 {
2519         int err;
2520         int index = vhcr->in_modifier;
2521         struct res_mtt *mtt;
2522         struct res_mpt *mpt;
2523         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2524         int phys;
2525         int id;
2526         u32 pd;
2527         int pd_slave;
2528
2529         id = index & mpt_mask(dev);
2530         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2531         if (err)
2532                 return err;
2533
2534         /* Disable memory windows for VFs. */
2535         if (!mr_is_region(inbox->buf)) {
2536                 err = -EPERM;
2537                 goto ex_abort;
2538         }
2539
2540         /* Make sure that the PD bits related to the slave id are zeros. */
2541         pd = mr_get_pd(inbox->buf);
2542         pd_slave = (pd >> 17) & 0x7f;
2543         if (pd_slave != 0 && --pd_slave != slave) {
2544                 err = -EPERM;
2545                 goto ex_abort;
2546         }
2547
2548         if (mr_is_fmr(inbox->buf)) {
2549                 /* FMR and Bind Enable are forbidden in slave devices. */
2550                 if (mr_is_bind_enabled(inbox->buf)) {
2551                         err = -EPERM;
2552                         goto ex_abort;
2553                 }
2554                 /* FMR and Memory Windows are also forbidden. */
2555                 if (!mr_is_region(inbox->buf)) {
2556                         err = -EPERM;
2557                         goto ex_abort;
2558                 }
2559         }
2560
2561         phys = mr_phys_mpt(inbox->buf);
2562         if (!phys) {
2563                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2564                 if (err)
2565                         goto ex_abort;
2566
2567                 err = check_mtt_range(dev, slave, mtt_base,
2568                                       mr_get_mtt_size(inbox->buf), mtt);
2569                 if (err)
2570                         goto ex_put;
2571
2572                 mpt->mtt = mtt;
2573         }
2574
2575         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2576         if (err)
2577                 goto ex_put;
2578
2579         if (!phys) {
2580                 atomic_inc(&mtt->ref_count);
2581                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2582         }
2583
2584         res_end_move(dev, slave, RES_MPT, id);
2585         return 0;
2586
2587 ex_put:
2588         if (!phys)
2589                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2590 ex_abort:
2591         res_abort_move(dev, slave, RES_MPT, id);
2592
2593         return err;
2594 }
2595
2596 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2597                            struct mlx4_vhcr *vhcr,
2598                            struct mlx4_cmd_mailbox *inbox,
2599                            struct mlx4_cmd_mailbox *outbox,
2600                            struct mlx4_cmd_info *cmd)
2601 {
2602         int err;
2603         int index = vhcr->in_modifier;
2604         struct res_mpt *mpt;
2605         int id;
2606
2607         id = index & mpt_mask(dev);
2608         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2609         if (err)
2610                 return err;
2611
2612         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2613         if (err)
2614                 goto ex_abort;
2615
2616         if (mpt->mtt)
2617                 atomic_dec(&mpt->mtt->ref_count);
2618
2619         res_end_move(dev, slave, RES_MPT, id);
2620         return 0;
2621
2622 ex_abort:
2623         res_abort_move(dev, slave, RES_MPT, id);
2624
2625         return err;
2626 }
2627
2628 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2629                            struct mlx4_vhcr *vhcr,
2630                            struct mlx4_cmd_mailbox *inbox,
2631                            struct mlx4_cmd_mailbox *outbox,
2632                            struct mlx4_cmd_info *cmd)
2633 {
2634         int err;
2635         int index = vhcr->in_modifier;
2636         struct res_mpt *mpt;
2637         int id;
2638
2639         id = index & mpt_mask(dev);
2640         err = get_res(dev, slave, id, RES_MPT, &mpt);
2641         if (err)
2642                 return err;
2643
2644         if (mpt->com.from_state == RES_MPT_MAPPED) {
2645                 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2646                  * that, the VF must read the MPT. But since the MPT entry memory is not
2647                  * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2648                  * entry contents. To guarantee that the MPT cannot be changed, the driver
2649                  * must perform HW2SW_MPT before this query and return the MPT entry to HW
2650                  * ownership fofollowing the change. The change here allows the VF to
2651                  * perform QUERY_MPT also when the entry is in SW ownership.
2652                  */
2653                 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2654                                         &mlx4_priv(dev)->mr_table.dmpt_table,
2655                                         mpt->key, NULL);
2656
2657                 if (NULL == mpt_entry || NULL == outbox->buf) {
2658                         err = -EINVAL;
2659                         goto out;
2660                 }
2661
2662                 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2663
2664                 err = 0;
2665         } else if (mpt->com.from_state == RES_MPT_HW) {
2666                 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2667         } else {
2668                 err = -EBUSY;
2669                 goto out;
2670         }
2671
2672
2673 out:
2674         put_res(dev, slave, id, RES_MPT);
2675         return err;
2676 }
2677
2678 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2679 {
2680         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2681 }
2682
2683 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2684 {
2685         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2686 }
2687
2688 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2689 {
2690         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2691 }
2692
2693 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2694                                   struct mlx4_qp_context *context)
2695 {
2696         u32 qpn = vhcr->in_modifier & 0xffffff;
2697         u32 qkey = 0;
2698
2699         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2700                 return;
2701
2702         /* adjust qkey in qp context */
2703         context->qkey = cpu_to_be32(qkey);
2704 }
2705
2706 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2707                                  struct mlx4_qp_context *qpc,
2708                                  struct mlx4_cmd_mailbox *inbox);
2709
2710 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2711                              struct mlx4_vhcr *vhcr,
2712                              struct mlx4_cmd_mailbox *inbox,
2713                              struct mlx4_cmd_mailbox *outbox,
2714                              struct mlx4_cmd_info *cmd)
2715 {
2716         int err;
2717         int qpn = vhcr->in_modifier & 0x7fffff;
2718         struct res_mtt *mtt;
2719         struct res_qp *qp;
2720         struct mlx4_qp_context *qpc = inbox->buf + 8;
2721         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2722         int mtt_size = qp_get_mtt_size(qpc);
2723         struct res_cq *rcq;
2724         struct res_cq *scq;
2725         int rcqn = qp_get_rcqn(qpc);
2726         int scqn = qp_get_scqn(qpc);
2727         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2728         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2729         struct res_srq *srq;
2730         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2731
2732         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2733         if (err)
2734                 return err;
2735
2736         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2737         if (err)
2738                 return err;
2739         qp->local_qpn = local_qpn;
2740         qp->sched_queue = 0;
2741         qp->param3 = 0;
2742         qp->vlan_control = 0;
2743         qp->fvl_rx = 0;
2744         qp->pri_path_fl = 0;
2745         qp->vlan_index = 0;
2746         qp->feup = 0;
2747         qp->qpc_flags = be32_to_cpu(qpc->flags);
2748
2749         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2750         if (err)
2751                 goto ex_abort;
2752
2753         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2754         if (err)
2755                 goto ex_put_mtt;
2756
2757         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2758         if (err)
2759                 goto ex_put_mtt;
2760
2761         if (scqn != rcqn) {
2762                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2763                 if (err)
2764                         goto ex_put_rcq;
2765         } else
2766                 scq = rcq;
2767
2768         if (use_srq) {
2769                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2770                 if (err)
2771                         goto ex_put_scq;
2772         }
2773
2774         adjust_proxy_tun_qkey(dev, vhcr, qpc);
2775         update_pkey_index(dev, slave, inbox);
2776         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2777         if (err)
2778                 goto ex_put_srq;
2779         atomic_inc(&mtt->ref_count);
2780         qp->mtt = mtt;
2781         atomic_inc(&rcq->ref_count);
2782         qp->rcq = rcq;
2783         atomic_inc(&scq->ref_count);
2784         qp->scq = scq;
2785
2786         if (scqn != rcqn)
2787                 put_res(dev, slave, scqn, RES_CQ);
2788
2789         if (use_srq) {
2790                 atomic_inc(&srq->ref_count);
2791                 put_res(dev, slave, srqn, RES_SRQ);
2792                 qp->srq = srq;
2793         }
2794         put_res(dev, slave, rcqn, RES_CQ);
2795         put_res(dev, slave, mtt_base, RES_MTT);
2796         res_end_move(dev, slave, RES_QP, qpn);
2797
2798         return 0;
2799
2800 ex_put_srq:
2801         if (use_srq)
2802                 put_res(dev, slave, srqn, RES_SRQ);
2803 ex_put_scq:
2804         if (scqn != rcqn)
2805                 put_res(dev, slave, scqn, RES_CQ);
2806 ex_put_rcq:
2807         put_res(dev, slave, rcqn, RES_CQ);
2808 ex_put_mtt:
2809         put_res(dev, slave, mtt_base, RES_MTT);
2810 ex_abort:
2811         res_abort_move(dev, slave, RES_QP, qpn);
2812
2813         return err;
2814 }
2815
2816 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2817 {
2818         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2819 }
2820
2821 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2822 {
2823         int log_eq_size = eqc->log_eq_size & 0x1f;
2824         int page_shift = (eqc->log_page_size & 0x3f) + 12;
2825
2826         if (log_eq_size + 5 < page_shift)
2827                 return 1;
2828
2829         return 1 << (log_eq_size + 5 - page_shift);
2830 }
2831
2832 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2833 {
2834         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2835 }
2836
2837 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2838 {
2839         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2840         int page_shift = (cqc->log_page_size & 0x3f) + 12;
2841
2842         if (log_cq_size + 5 < page_shift)
2843                 return 1;
2844
2845         return 1 << (log_cq_size + 5 - page_shift);
2846 }
2847
2848 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2849                           struct mlx4_vhcr *vhcr,
2850                           struct mlx4_cmd_mailbox *inbox,
2851                           struct mlx4_cmd_mailbox *outbox,
2852                           struct mlx4_cmd_info *cmd)
2853 {
2854         int err;
2855         int eqn = vhcr->in_modifier;
2856         int res_id = (slave << 10) | eqn;
2857         struct mlx4_eq_context *eqc = inbox->buf;
2858         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2859         int mtt_size = eq_get_mtt_size(eqc);
2860         struct res_eq *eq;
2861         struct res_mtt *mtt;
2862
2863         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2864         if (err)
2865                 return err;
2866         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2867         if (err)
2868                 goto out_add;
2869
2870         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2871         if (err)
2872                 goto out_move;
2873
2874         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2875         if (err)
2876                 goto out_put;
2877
2878         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2879         if (err)
2880                 goto out_put;
2881
2882         atomic_inc(&mtt->ref_count);
2883         eq->mtt = mtt;
2884         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2885         res_end_move(dev, slave, RES_EQ, res_id);
2886         return 0;
2887
2888 out_put:
2889         put_res(dev, slave, mtt->com.res_id, RES_MTT);
2890 out_move:
2891         res_abort_move(dev, slave, RES_EQ, res_id);
2892 out_add:
2893         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2894         return err;
2895 }
2896
2897 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
2898                             struct mlx4_vhcr *vhcr,
2899                             struct mlx4_cmd_mailbox *inbox,
2900                             struct mlx4_cmd_mailbox *outbox,
2901                             struct mlx4_cmd_info *cmd)
2902 {
2903         int err;
2904         u8 get = vhcr->op_modifier;
2905
2906         if (get != 1)
2907                 return -EPERM;
2908
2909         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2910
2911         return err;
2912 }
2913
2914 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2915                               int len, struct res_mtt **res)
2916 {
2917         struct mlx4_priv *priv = mlx4_priv(dev);
2918         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2919         struct res_mtt *mtt;
2920         int err = -EINVAL;
2921
2922         spin_lock_irq(mlx4_tlock(dev));
2923         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2924                             com.list) {
2925                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2926                         *res = mtt;
2927                         mtt->com.from_state = mtt->com.state;
2928                         mtt->com.state = RES_MTT_BUSY;
2929                         err = 0;
2930                         break;
2931                 }
2932         }
2933         spin_unlock_irq(mlx4_tlock(dev));
2934
2935         return err;
2936 }
2937
2938 static int verify_qp_parameters(struct mlx4_dev *dev,
2939                                 struct mlx4_vhcr *vhcr,
2940                                 struct mlx4_cmd_mailbox *inbox,
2941                                 enum qp_transition transition, u8 slave)
2942 {
2943         u32                     qp_type;
2944         u32                     qpn;
2945         struct mlx4_qp_context  *qp_ctx;
2946         enum mlx4_qp_optpar     optpar;
2947         int port;
2948         int num_gids;
2949
2950         qp_ctx  = inbox->buf + 8;
2951         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2952         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
2953
2954         if (slave != mlx4_master_func_num(dev)) {
2955                 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
2956                 /* setting QP rate-limit is disallowed for VFs */
2957                 if (qp_ctx->rate_limit_params)
2958                         return -EPERM;
2959         }
2960
2961         switch (qp_type) {
2962         case MLX4_QP_ST_RC:
2963         case MLX4_QP_ST_XRC:
2964         case MLX4_QP_ST_UC:
2965                 switch (transition) {
2966                 case QP_TRANS_INIT2RTR:
2967                 case QP_TRANS_RTR2RTS:
2968                 case QP_TRANS_RTS2RTS:
2969                 case QP_TRANS_SQD2SQD:
2970                 case QP_TRANS_SQD2RTS:
2971                         if (slave != mlx4_master_func_num(dev))
2972                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2973                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2974                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2975                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2976                                         else
2977                                                 num_gids = 1;
2978                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
2979                                                 return -EINVAL;
2980                                 }
2981                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2982                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2983                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2984                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2985                                         else
2986                                                 num_gids = 1;
2987                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
2988                                                 return -EINVAL;
2989                                 }
2990                         break;
2991                 default:
2992                         break;
2993                 }
2994                 break;
2995
2996         case MLX4_QP_ST_MLX:
2997                 qpn = vhcr->in_modifier & 0x7fffff;
2998                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2999                 if (transition == QP_TRANS_INIT2RTR &&
3000                     slave != mlx4_master_func_num(dev) &&
3001                     mlx4_is_qp_reserved(dev, qpn) &&
3002                     !mlx4_vf_smi_enabled(dev, slave, port)) {
3003                         /* only enabled VFs may create MLX proxy QPs */
3004                         mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3005                                  __func__, slave, port);
3006                         return -EPERM;
3007                 }
3008                 break;
3009
3010         default:
3011                 break;
3012         }
3013
3014         return 0;
3015 }
3016
3017 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3018                            struct mlx4_vhcr *vhcr,
3019                            struct mlx4_cmd_mailbox *inbox,
3020                            struct mlx4_cmd_mailbox *outbox,
3021                            struct mlx4_cmd_info *cmd)
3022 {
3023         struct mlx4_mtt mtt;
3024         __be64 *page_list = inbox->buf;
3025         u64 *pg_list = (u64 *)page_list;
3026         int i;
3027         struct res_mtt *rmtt = NULL;
3028         int start = be64_to_cpu(page_list[0]);
3029         int npages = vhcr->in_modifier;
3030         int err;
3031
3032         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3033         if (err)
3034                 return err;
3035
3036         /* Call the SW implementation of write_mtt:
3037          * - Prepare a dummy mtt struct
3038          * - Translate inbox contents to simple addresses in host endianness */
3039         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3040                             we don't really use it */
3041         mtt.order = 0;
3042         mtt.page_shift = 0;
3043         for (i = 0; i < npages; ++i)
3044                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3045
3046         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3047                                ((u64 *)page_list + 2));
3048
3049         if (rmtt)
3050                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3051
3052         return err;
3053 }
3054
3055 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3056                           struct mlx4_vhcr *vhcr,
3057                           struct mlx4_cmd_mailbox *inbox,
3058                           struct mlx4_cmd_mailbox *outbox,
3059                           struct mlx4_cmd_info *cmd)
3060 {
3061         int eqn = vhcr->in_modifier;
3062         int res_id = eqn | (slave << 10);
3063         struct res_eq *eq;
3064         int err;
3065
3066         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3067         if (err)
3068                 return err;
3069
3070         err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3071         if (err)
3072                 goto ex_abort;
3073
3074         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3075         if (err)
3076                 goto ex_put;
3077
3078         atomic_dec(&eq->mtt->ref_count);
3079         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3080         res_end_move(dev, slave, RES_EQ, res_id);
3081         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3082
3083         return 0;
3084
3085 ex_put:
3086         put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3087 ex_abort:
3088         res_abort_move(dev, slave, RES_EQ, res_id);
3089
3090         return err;
3091 }
3092
3093 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3094 {
3095         struct mlx4_priv *priv = mlx4_priv(dev);
3096         struct mlx4_slave_event_eq_info *event_eq;
3097         struct mlx4_cmd_mailbox *mailbox;
3098         u32 in_modifier = 0;
3099         int err;
3100         int res_id;
3101         struct res_eq *req;
3102
3103         if (!priv->mfunc.master.slave_state)
3104                 return -EINVAL;
3105
3106         /* check for slave valid, slave not PF, and slave active */
3107         if (slave < 0 || slave > dev->persist->num_vfs ||
3108             slave == dev->caps.function ||
3109             !priv->mfunc.master.slave_state[slave].active)
3110                 return 0;
3111
3112         event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3113
3114         /* Create the event only if the slave is registered */
3115         if (event_eq->eqn < 0)
3116                 return 0;
3117
3118         mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3119         res_id = (slave << 10) | event_eq->eqn;
3120         err = get_res(dev, slave, res_id, RES_EQ, &req);
3121         if (err)
3122                 goto unlock;
3123
3124         if (req->com.from_state != RES_EQ_HW) {
3125                 err = -EINVAL;
3126                 goto put;
3127         }
3128
3129         mailbox = mlx4_alloc_cmd_mailbox(dev);
3130         if (IS_ERR(mailbox)) {
3131                 err = PTR_ERR(mailbox);
3132                 goto put;
3133         }
3134
3135         if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3136                 ++event_eq->token;
3137                 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3138         }
3139
3140         memcpy(mailbox->buf, (u8 *) eqe, 28);
3141
3142         in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3143
3144         err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3145                        MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3146                        MLX4_CMD_NATIVE);
3147
3148         put_res(dev, slave, res_id, RES_EQ);
3149         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3150         mlx4_free_cmd_mailbox(dev, mailbox);
3151         return err;
3152
3153 put:
3154         put_res(dev, slave, res_id, RES_EQ);
3155
3156 unlock:
3157         mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3158         return err;
3159 }
3160
3161 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3162                           struct mlx4_vhcr *vhcr,
3163                           struct mlx4_cmd_mailbox *inbox,
3164                           struct mlx4_cmd_mailbox *outbox,
3165                           struct mlx4_cmd_info *cmd)
3166 {
3167         int eqn = vhcr->in_modifier;
3168         int res_id = eqn | (slave << 10);
3169         struct res_eq *eq;
3170         int err;
3171
3172         err = get_res(dev, slave, res_id, RES_EQ, &eq);
3173         if (err)
3174                 return err;
3175
3176         if (eq->com.from_state != RES_EQ_HW) {
3177                 err = -EINVAL;
3178                 goto ex_put;
3179         }
3180
3181         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3182
3183 ex_put:
3184         put_res(dev, slave, res_id, RES_EQ);
3185         return err;
3186 }
3187
3188 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3189                           struct mlx4_vhcr *vhcr,
3190                           struct mlx4_cmd_mailbox *inbox,
3191                           struct mlx4_cmd_mailbox *outbox,
3192                           struct mlx4_cmd_info *cmd)
3193 {
3194         int err;
3195         int cqn = vhcr->in_modifier;
3196         struct mlx4_cq_context *cqc = inbox->buf;
3197         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3198         struct res_cq *cq = NULL;
3199         struct res_mtt *mtt;
3200
3201         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3202         if (err)
3203                 return err;
3204         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3205         if (err)
3206                 goto out_move;
3207         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3208         if (err)
3209                 goto out_put;
3210         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3211         if (err)
3212                 goto out_put;
3213         atomic_inc(&mtt->ref_count);
3214         cq->mtt = mtt;
3215         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3216         res_end_move(dev, slave, RES_CQ, cqn);
3217         return 0;
3218
3219 out_put:
3220         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3221 out_move:
3222         res_abort_move(dev, slave, RES_CQ, cqn);
3223         return err;
3224 }
3225
3226 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3227                           struct mlx4_vhcr *vhcr,
3228                           struct mlx4_cmd_mailbox *inbox,
3229                           struct mlx4_cmd_mailbox *outbox,
3230                           struct mlx4_cmd_info *cmd)
3231 {
3232         int err;
3233         int cqn = vhcr->in_modifier;
3234         struct res_cq *cq = NULL;
3235
3236         err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3237         if (err)
3238                 return err;
3239         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3240         if (err)
3241                 goto out_move;
3242         atomic_dec(&cq->mtt->ref_count);
3243         res_end_move(dev, slave, RES_CQ, cqn);
3244         return 0;
3245
3246 out_move:
3247         res_abort_move(dev, slave, RES_CQ, cqn);
3248         return err;
3249 }
3250
3251 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3252                           struct mlx4_vhcr *vhcr,
3253                           struct mlx4_cmd_mailbox *inbox,
3254                           struct mlx4_cmd_mailbox *outbox,
3255                           struct mlx4_cmd_info *cmd)
3256 {
3257         int cqn = vhcr->in_modifier;
3258         struct res_cq *cq;
3259         int err;
3260
3261         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3262         if (err)
3263                 return err;
3264
3265         if (cq->com.from_state != RES_CQ_HW)
3266                 goto ex_put;
3267
3268         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3269 ex_put:
3270         put_res(dev, slave, cqn, RES_CQ);
3271
3272         return err;
3273 }
3274
3275 static int handle_resize(struct mlx4_dev *dev, int slave,
3276                          struct mlx4_vhcr *vhcr,
3277                          struct mlx4_cmd_mailbox *inbox,
3278                          struct mlx4_cmd_mailbox *outbox,
3279                          struct mlx4_cmd_info *cmd,
3280                          struct res_cq *cq)
3281 {
3282         int err;
3283         struct res_mtt *orig_mtt;
3284         struct res_mtt *mtt;
3285         struct mlx4_cq_context *cqc = inbox->buf;
3286         int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3287
3288         err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3289         if (err)
3290                 return err;
3291
3292         if (orig_mtt != cq->mtt) {
3293                 err = -EINVAL;
3294                 goto ex_put;
3295         }
3296
3297         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3298         if (err)
3299                 goto ex_put;
3300
3301         err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3302         if (err)
3303                 goto ex_put1;
3304         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3305         if (err)
3306                 goto ex_put1;
3307         atomic_dec(&orig_mtt->ref_count);
3308         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3309         atomic_inc(&mtt->ref_count);
3310         cq->mtt = mtt;
3311         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3312         return 0;
3313
3314 ex_put1:
3315         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3316 ex_put:
3317         put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3318
3319         return err;
3320
3321 }
3322
3323 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3324                            struct mlx4_vhcr *vhcr,
3325                            struct mlx4_cmd_mailbox *inbox,
3326                            struct mlx4_cmd_mailbox *outbox,
3327                            struct mlx4_cmd_info *cmd)
3328 {
3329         int cqn = vhcr->in_modifier;
3330         struct res_cq *cq;
3331         int err;
3332
3333         err = get_res(dev, slave, cqn, RES_CQ, &cq);
3334         if (err)
3335                 return err;
3336
3337         if (cq->com.from_state != RES_CQ_HW)
3338                 goto ex_put;
3339
3340         if (vhcr->op_modifier == 0) {
3341                 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3342                 goto ex_put;
3343         }
3344
3345         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3346 ex_put:
3347         put_res(dev, slave, cqn, RES_CQ);
3348
3349         return err;
3350 }
3351
3352 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3353 {
3354         int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3355         int log_rq_stride = srqc->logstride & 7;
3356         int page_shift = (srqc->log_page_size & 0x3f) + 12;
3357
3358         if (log_srq_size + log_rq_stride + 4 < page_shift)
3359                 return 1;
3360
3361         return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3362 }
3363
3364 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3365                            struct mlx4_vhcr *vhcr,
3366                            struct mlx4_cmd_mailbox *inbox,
3367                            struct mlx4_cmd_mailbox *outbox,
3368                            struct mlx4_cmd_info *cmd)
3369 {
3370         int err;
3371         int srqn = vhcr->in_modifier;
3372         struct res_mtt *mtt;
3373         struct res_srq *srq = NULL;
3374         struct mlx4_srq_context *srqc = inbox->buf;
3375         int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3376
3377         if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3378                 return -EINVAL;
3379
3380         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3381         if (err)
3382                 return err;
3383         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3384         if (err)
3385                 goto ex_abort;
3386         err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3387                               mtt);
3388         if (err)
3389                 goto ex_put_mtt;
3390
3391         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3392         if (err)
3393                 goto ex_put_mtt;
3394
3395         atomic_inc(&mtt->ref_count);
3396         srq->mtt = mtt;
3397         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3398         res_end_move(dev, slave, RES_SRQ, srqn);
3399         return 0;
3400
3401 ex_put_mtt:
3402         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3403 ex_abort:
3404         res_abort_move(dev, slave, RES_SRQ, srqn);
3405
3406         return err;
3407 }
3408
3409 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3410                            struct mlx4_vhcr *vhcr,
3411                            struct mlx4_cmd_mailbox *inbox,
3412                            struct mlx4_cmd_mailbox *outbox,
3413                            struct mlx4_cmd_info *cmd)
3414 {
3415         int err;
3416         int srqn = vhcr->in_modifier;
3417         struct res_srq *srq = NULL;
3418
3419         err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3420         if (err)
3421                 return err;
3422         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3423         if (err)
3424                 goto ex_abort;
3425         atomic_dec(&srq->mtt->ref_count);
3426         if (srq->cq)
3427                 atomic_dec(&srq->cq->ref_count);
3428         res_end_move(dev, slave, RES_SRQ, srqn);
3429
3430         return 0;
3431
3432 ex_abort:
3433         res_abort_move(dev, slave, RES_SRQ, srqn);
3434
3435         return err;
3436 }
3437
3438 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3439                            struct mlx4_vhcr *vhcr,
3440                            struct mlx4_cmd_mailbox *inbox,
3441                            struct mlx4_cmd_mailbox *outbox,
3442                            struct mlx4_cmd_info *cmd)
3443 {
3444         int err;
3445         int srqn = vhcr->in_modifier;
3446         struct res_srq *srq;
3447
3448         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3449         if (err)
3450                 return err;
3451         if (srq->com.from_state != RES_SRQ_HW) {
3452                 err = -EBUSY;
3453                 goto out;
3454         }
3455         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3456 out:
3457         put_res(dev, slave, srqn, RES_SRQ);
3458         return err;
3459 }
3460
3461 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3462                          struct mlx4_vhcr *vhcr,
3463                          struct mlx4_cmd_mailbox *inbox,
3464                          struct mlx4_cmd_mailbox *outbox,
3465                          struct mlx4_cmd_info *cmd)
3466 {
3467         int err;
3468         int srqn = vhcr->in_modifier;
3469         struct res_srq *srq;
3470
3471         err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3472         if (err)
3473                 return err;
3474
3475         if (srq->com.from_state != RES_SRQ_HW) {
3476                 err = -EBUSY;
3477                 goto out;
3478         }
3479
3480         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3481 out:
3482         put_res(dev, slave, srqn, RES_SRQ);
3483         return err;
3484 }
3485
3486 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3487                         struct mlx4_vhcr *vhcr,
3488                         struct mlx4_cmd_mailbox *inbox,
3489                         struct mlx4_cmd_mailbox *outbox,
3490                         struct mlx4_cmd_info *cmd)
3491 {
3492         int err;
3493         int qpn = vhcr->in_modifier & 0x7fffff;
3494         struct res_qp *qp;
3495
3496         err = get_res(dev, slave, qpn, RES_QP, &qp);
3497         if (err)
3498                 return err;
3499         if (qp->com.from_state != RES_QP_HW) {
3500                 err = -EBUSY;
3501                 goto out;
3502         }
3503
3504         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3505 out:
3506         put_res(dev, slave, qpn, RES_QP);
3507         return err;
3508 }
3509
3510 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3511                               struct mlx4_vhcr *vhcr,
3512                               struct mlx4_cmd_mailbox *inbox,
3513                               struct mlx4_cmd_mailbox *outbox,
3514                               struct mlx4_cmd_info *cmd)
3515 {
3516         struct mlx4_qp_context *context = inbox->buf + 8;
3517         adjust_proxy_tun_qkey(dev, vhcr, context);
3518         update_pkey_index(dev, slave, inbox);
3519         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3520 }
3521
3522 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3523                                   struct mlx4_qp_context *qpc,
3524                                   struct mlx4_cmd_mailbox *inbox)
3525 {
3526         enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3527         u8 pri_sched_queue;
3528         int port = mlx4_slave_convert_port(
3529                    dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3530
3531         if (port < 0)
3532                 return -EINVAL;
3533
3534         pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3535                           ((port & 1) << 6);
3536
3537         if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3538             qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3539                 qpc->pri_path.sched_queue = pri_sched_queue;
3540         }
3541
3542         if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3543                 port = mlx4_slave_convert_port(
3544                                 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3545                                 + 1) - 1;
3546                 if (port < 0)
3547                         return -EINVAL;
3548                 qpc->alt_path.sched_queue =
3549                         (qpc->alt_path.sched_queue & ~(1 << 6)) |
3550                         (port & 1) << 6;
3551         }
3552         return 0;
3553 }
3554
3555 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3556                                 struct mlx4_qp_context *qpc,
3557                                 struct mlx4_cmd_mailbox *inbox)
3558 {
3559         u64 mac;
3560         int port;
3561         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3562         u8 sched = *(u8 *)(inbox->buf + 64);
3563         u8 smac_ix;
3564
3565         port = (sched >> 6 & 1) + 1;
3566         if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3567                 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3568                 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3569                         return -ENOENT;
3570         }
3571         return 0;
3572 }
3573
3574 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3575                              struct mlx4_vhcr *vhcr,
3576                              struct mlx4_cmd_mailbox *inbox,
3577                              struct mlx4_cmd_mailbox *outbox,
3578                              struct mlx4_cmd_info *cmd)
3579 {
3580         int err;
3581         struct mlx4_qp_context *qpc = inbox->buf + 8;
3582         int qpn = vhcr->in_modifier & 0x7fffff;
3583         struct res_qp *qp;
3584         u8 orig_sched_queue;
3585         __be32  orig_param3 = qpc->param3;
3586         u8 orig_vlan_control = qpc->pri_path.vlan_control;
3587         u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3588         u8 orig_pri_path_fl = qpc->pri_path.fl;
3589         u8 orig_vlan_index = qpc->pri_path.vlan_index;
3590         u8 orig_feup = qpc->pri_path.feup;
3591
3592         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3593         if (err)
3594                 return err;
3595         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3596         if (err)
3597                 return err;
3598
3599         if (roce_verify_mac(dev, slave, qpc, inbox))
3600                 return -EINVAL;
3601
3602         update_pkey_index(dev, slave, inbox);
3603         update_gid(dev, inbox, (u8)slave);
3604         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3605         orig_sched_queue = qpc->pri_path.sched_queue;
3606         err = update_vport_qp_param(dev, inbox, slave, qpn);
3607         if (err)
3608                 return err;
3609
3610         err = get_res(dev, slave, qpn, RES_QP, &qp);
3611         if (err)
3612                 return err;
3613         if (qp->com.from_state != RES_QP_HW) {
3614                 err = -EBUSY;
3615                 goto out;
3616         }
3617
3618         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3619 out:
3620         /* if no error, save sched queue value passed in by VF. This is
3621          * essentially the QOS value provided by the VF. This will be useful
3622          * if we allow dynamic changes from VST back to VGT
3623          */
3624         if (!err) {
3625                 qp->sched_queue = orig_sched_queue;
3626                 qp->param3      = orig_param3;
3627                 qp->vlan_control = orig_vlan_control;
3628                 qp->fvl_rx      =  orig_fvl_rx;
3629                 qp->pri_path_fl = orig_pri_path_fl;
3630                 qp->vlan_index  = orig_vlan_index;
3631                 qp->feup        = orig_feup;
3632         }
3633         put_res(dev, slave, qpn, RES_QP);
3634         return err;
3635 }
3636
3637 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3638                             struct mlx4_vhcr *vhcr,
3639                             struct mlx4_cmd_mailbox *inbox,
3640                             struct mlx4_cmd_mailbox *outbox,
3641                             struct mlx4_cmd_info *cmd)
3642 {
3643         int err;
3644         struct mlx4_qp_context *context = inbox->buf + 8;
3645
3646         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3647         if (err)
3648                 return err;
3649         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3650         if (err)
3651                 return err;
3652
3653         update_pkey_index(dev, slave, inbox);
3654         update_gid(dev, inbox, (u8)slave);
3655         adjust_proxy_tun_qkey(dev, vhcr, context);
3656         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3657 }
3658
3659 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3660                             struct mlx4_vhcr *vhcr,
3661                             struct mlx4_cmd_mailbox *inbox,
3662                             struct mlx4_cmd_mailbox *outbox,
3663                             struct mlx4_cmd_info *cmd)
3664 {
3665         int err;
3666         struct mlx4_qp_context *context = inbox->buf + 8;
3667
3668         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3669         if (err)
3670                 return err;
3671         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3672         if (err)
3673                 return err;
3674
3675         update_pkey_index(dev, slave, inbox);
3676         update_gid(dev, inbox, (u8)slave);
3677         adjust_proxy_tun_qkey(dev, vhcr, context);
3678         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3679 }
3680
3681
3682 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3683                               struct mlx4_vhcr *vhcr,
3684                               struct mlx4_cmd_mailbox *inbox,
3685                               struct mlx4_cmd_mailbox *outbox,
3686                               struct mlx4_cmd_info *cmd)
3687 {
3688         struct mlx4_qp_context *context = inbox->buf + 8;
3689         int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3690         if (err)
3691                 return err;
3692         adjust_proxy_tun_qkey(dev, vhcr, context);
3693         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3694 }
3695
3696 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3697                             struct mlx4_vhcr *vhcr,
3698                             struct mlx4_cmd_mailbox *inbox,
3699                             struct mlx4_cmd_mailbox *outbox,
3700                             struct mlx4_cmd_info *cmd)
3701 {
3702         int err;
3703         struct mlx4_qp_context *context = inbox->buf + 8;
3704
3705         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3706         if (err)
3707                 return err;
3708         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3709         if (err)
3710                 return err;
3711
3712         adjust_proxy_tun_qkey(dev, vhcr, context);
3713         update_gid(dev, inbox, (u8)slave);
3714         update_pkey_index(dev, slave, inbox);
3715         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3716 }
3717
3718 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3719                             struct mlx4_vhcr *vhcr,
3720                             struct mlx4_cmd_mailbox *inbox,
3721                             struct mlx4_cmd_mailbox *outbox,
3722                             struct mlx4_cmd_info *cmd)
3723 {
3724         int err;
3725         struct mlx4_qp_context *context = inbox->buf + 8;
3726
3727         err = adjust_qp_sched_queue(dev, slave, context, inbox);
3728         if (err)
3729                 return err;
3730         err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3731         if (err)
3732                 return err;
3733
3734         adjust_proxy_tun_qkey(dev, vhcr, context);
3735         update_gid(dev, inbox, (u8)slave);
3736         update_pkey_index(dev, slave, inbox);
3737         return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3738 }
3739
3740 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3741                          struct mlx4_vhcr *vhcr,
3742                          struct mlx4_cmd_mailbox *inbox,
3743                          struct mlx4_cmd_mailbox *outbox,
3744                          struct mlx4_cmd_info *cmd)
3745 {
3746         int err;
3747         int qpn = vhcr->in_modifier & 0x7fffff;
3748         struct res_qp *qp;
3749
3750         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3751         if (err)
3752                 return err;
3753         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3754         if (err)
3755                 goto ex_abort;
3756
3757         atomic_dec(&qp->mtt->ref_count);
3758         atomic_dec(&qp->rcq->ref_count);
3759         atomic_dec(&qp->scq->ref_count);
3760         if (qp->srq)
3761                 atomic_dec(&qp->srq->ref_count);
3762         res_end_move(dev, slave, RES_QP, qpn);
3763         return 0;
3764
3765 ex_abort:
3766         res_abort_move(dev, slave, RES_QP, qpn);
3767
3768         return err;
3769 }
3770
3771 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3772                                 struct res_qp *rqp, u8 *gid)
3773 {
3774         struct res_gid *res;
3775
3776         list_for_each_entry(res, &rqp->mcg_list, list) {
3777                 if (!memcmp(res->gid, gid, 16))
3778                         return res;
3779         }
3780         return NULL;
3781 }
3782
3783 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3784                        u8 *gid, enum mlx4_protocol prot,
3785                        enum mlx4_steer_type steer, u64 reg_id)
3786 {
3787         struct res_gid *res;
3788         int err;
3789
3790         res = kzalloc(sizeof *res, GFP_KERNEL);
3791         if (!res)
3792                 return -ENOMEM;
3793
3794         spin_lock_irq(&rqp->mcg_spl);
3795         if (find_gid(dev, slave, rqp, gid)) {
3796                 kfree(res);
3797                 err = -EEXIST;
3798         } else {
3799                 memcpy(res->gid, gid, 16);
3800                 res->prot = prot;
3801                 res->steer = steer;
3802                 res->reg_id = reg_id;
3803                 list_add_tail(&res->list, &rqp->mcg_list);
3804                 err = 0;
3805         }
3806         spin_unlock_irq(&rqp->mcg_spl);
3807
3808         return err;
3809 }
3810
3811 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3812                        u8 *gid, enum mlx4_protocol prot,
3813                        enum mlx4_steer_type steer, u64 *reg_id)
3814 {
3815         struct res_gid *res;
3816         int err;
3817
3818         spin_lock_irq(&rqp->mcg_spl);
3819         res = find_gid(dev, slave, rqp, gid);
3820         if (!res || res->prot != prot || res->steer != steer)
3821                 err = -EINVAL;
3822         else {
3823                 *reg_id = res->reg_id;
3824                 list_del(&res->list);
3825                 kfree(res);
3826                 err = 0;
3827         }
3828         spin_unlock_irq(&rqp->mcg_spl);
3829
3830         return err;
3831 }
3832
3833 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3834                      u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3835                      enum mlx4_steer_type type, u64 *reg_id)
3836 {
3837         switch (dev->caps.steering_mode) {
3838         case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3839                 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3840                 if (port < 0)
3841                         return port;
3842                 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3843                                                 block_loopback, prot,
3844                                                 reg_id);
3845         }
3846         case MLX4_STEERING_MODE_B0:
3847                 if (prot == MLX4_PROT_ETH) {
3848                         int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3849                         if (port < 0)
3850                                 return port;
3851                         gid[5] = port;
3852                 }
3853                 return mlx4_qp_attach_common(dev, qp, gid,
3854                                             block_loopback, prot, type);
3855         default:
3856                 return -EINVAL;
3857         }
3858 }
3859
3860 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3861                      u8 gid[16], enum mlx4_protocol prot,
3862                      enum mlx4_steer_type type, u64 reg_id)
3863 {
3864         switch (dev->caps.steering_mode) {
3865         case MLX4_STEERING_MODE_DEVICE_MANAGED:
3866                 return mlx4_flow_detach(dev, reg_id);
3867         case MLX4_STEERING_MODE_B0:
3868                 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3869         default:
3870                 return -EINVAL;
3871         }
3872 }
3873
3874 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3875                             u8 *gid, enum mlx4_protocol prot)
3876 {
3877         int real_port;
3878
3879         if (prot != MLX4_PROT_ETH)
3880                 return 0;
3881
3882         if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3883             dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3884                 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3885                 if (real_port < 0)
3886                         return -EINVAL;
3887                 gid[5] = real_port;
3888         }
3889
3890         return 0;
3891 }
3892
3893 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3894                                struct mlx4_vhcr *vhcr,
3895                                struct mlx4_cmd_mailbox *inbox,
3896                                struct mlx4_cmd_mailbox *outbox,
3897                                struct mlx4_cmd_info *cmd)
3898 {
3899         struct mlx4_qp qp; /* dummy for calling attach/detach */
3900         u8 *gid = inbox->buf;
3901         enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3902         int err;
3903         int qpn;
3904         struct res_qp *rqp;
3905         u64 reg_id = 0;
3906         int attach = vhcr->op_modifier;
3907         int block_loopback = vhcr->in_modifier >> 31;
3908         u8 steer_type_mask = 2;
3909         enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3910
3911         qpn = vhcr->in_modifier & 0xffffff;
3912         err = get_res(dev, slave, qpn, RES_QP, &rqp);
3913         if (err)
3914                 return err;
3915
3916         qp.qpn = qpn;
3917         if (attach) {
3918                 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3919                                 type, &reg_id);
3920                 if (err) {
3921                         pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3922                         goto ex_put;
3923                 }
3924                 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3925                 if (err)
3926                         goto ex_detach;
3927         } else {
3928                 err = mlx4_adjust_port(dev, slave, gid, prot);
3929                 if (err)
3930                         goto ex_put;
3931
3932                 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3933                 if (err)
3934                         goto ex_put;
3935
3936                 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3937                 if (err)
3938                         pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3939                                qpn, reg_id);
3940         }
3941         put_res(dev, slave, qpn, RES_QP);
3942         return err;
3943
3944 ex_detach:
3945         qp_detach(dev, &qp, gid, prot, type, reg_id);
3946 ex_put:
3947         put_res(dev, slave, qpn, RES_QP);
3948         return err;
3949 }
3950
3951 /*
3952  * MAC validation for Flow Steering rules.
3953  * VF can attach rules only with a mac address which is assigned to it.
3954  */
3955 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3956                                    struct list_head *rlist)
3957 {
3958         struct mac_res *res, *tmp;
3959         __be64 be_mac;
3960
3961         /* make sure it isn't multicast or broadcast mac*/
3962         if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3963             !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3964                 list_for_each_entry_safe(res, tmp, rlist, list) {
3965                         be_mac = cpu_to_be64(res->mac << 16);
3966                         if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
3967                                 return 0;
3968                 }
3969                 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3970                        eth_header->eth.dst_mac, slave);
3971                 return -EINVAL;
3972         }
3973         return 0;
3974 }
3975
3976 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
3977                                          struct _rule_hw *eth_header)
3978 {
3979         if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
3980             is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3981                 struct mlx4_net_trans_rule_hw_eth *eth =
3982                         (struct mlx4_net_trans_rule_hw_eth *)eth_header;
3983                 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
3984                 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
3985                         next_rule->rsvd == 0;
3986
3987                 if (last_rule)
3988                         ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
3989         }
3990 }
3991
3992 /*
3993  * In case of missing eth header, append eth header with a MAC address
3994  * assigned to the VF.
3995  */
3996 static int add_eth_header(struct mlx4_dev *dev, int slave,
3997                           struct mlx4_cmd_mailbox *inbox,
3998                           struct list_head *rlist, int header_id)
3999 {
4000         struct mac_res *res, *tmp;
4001         u8 port;
4002         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4003         struct mlx4_net_trans_rule_hw_eth *eth_header;
4004         struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4005         struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4006         __be64 be_mac = 0;
4007         __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4008
4009         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4010         port = ctrl->port;
4011         eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4012
4013         /* Clear a space in the inbox for eth header */
4014         switch (header_id) {
4015         case MLX4_NET_TRANS_RULE_ID_IPV4:
4016                 ip_header =
4017                         (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4018                 memmove(ip_header, eth_header,
4019                         sizeof(*ip_header) + sizeof(*l4_header));
4020                 break;
4021         case MLX4_NET_TRANS_RULE_ID_TCP:
4022         case MLX4_NET_TRANS_RULE_ID_UDP:
4023                 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4024                             (eth_header + 1);
4025                 memmove(l4_header, eth_header, sizeof(*l4_header));
4026                 break;
4027         default:
4028                 return -EINVAL;
4029         }
4030         list_for_each_entry_safe(res, tmp, rlist, list) {
4031                 if (port == res->port) {
4032                         be_mac = cpu_to_be64(res->mac << 16);
4033                         break;
4034                 }
4035         }
4036         if (!be_mac) {
4037                 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4038                        port);
4039                 return -EINVAL;
4040         }
4041
4042         memset(eth_header, 0, sizeof(*eth_header));
4043         eth_header->size = sizeof(*eth_header) >> 2;
4044         eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4045         memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4046         memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4047
4048         return 0;
4049
4050 }
4051
4052 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4053 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4054                            struct mlx4_vhcr *vhcr,
4055                            struct mlx4_cmd_mailbox *inbox,
4056                            struct mlx4_cmd_mailbox *outbox,
4057                            struct mlx4_cmd_info *cmd_info)
4058 {
4059         int err;
4060         u32 qpn = vhcr->in_modifier & 0xffffff;
4061         struct res_qp *rqp;
4062         u64 mac;
4063         unsigned port;
4064         u64 pri_addr_path_mask;
4065         struct mlx4_update_qp_context *cmd;
4066         int smac_index;
4067
4068         cmd = (struct mlx4_update_qp_context *)inbox->buf;
4069
4070         pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4071         if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4072             (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4073                 return -EPERM;
4074
4075         /* Just change the smac for the QP */
4076         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4077         if (err) {
4078                 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4079                 return err;
4080         }
4081
4082         port = (rqp->sched_queue >> 6 & 1) + 1;
4083
4084         if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4085                 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4086                 err = mac_find_smac_ix_in_slave(dev, slave, port,
4087                                                 smac_index, &mac);
4088
4089                 if (err) {
4090                         mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4091                                  qpn, smac_index);
4092                         goto err_mac;
4093                 }
4094         }
4095
4096         err = mlx4_cmd(dev, inbox->dma,
4097                        vhcr->in_modifier, 0,
4098                        MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4099                        MLX4_CMD_NATIVE);
4100         if (err) {
4101                 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4102                 goto err_mac;
4103         }
4104
4105 err_mac:
4106         put_res(dev, slave, qpn, RES_QP);
4107         return err;
4108 }
4109
4110 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4111                                          struct mlx4_vhcr *vhcr,
4112                                          struct mlx4_cmd_mailbox *inbox,
4113                                          struct mlx4_cmd_mailbox *outbox,
4114                                          struct mlx4_cmd_info *cmd)
4115 {
4116
4117         struct mlx4_priv *priv = mlx4_priv(dev);
4118         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4119         struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4120         int err;
4121         int qpn;
4122         struct res_qp *rqp;
4123         struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4124         struct _rule_hw  *rule_header;
4125         int header_id;
4126
4127         if (dev->caps.steering_mode !=
4128             MLX4_STEERING_MODE_DEVICE_MANAGED)
4129                 return -EOPNOTSUPP;
4130
4131         ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4132         ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4133         if (ctrl->port <= 0)
4134                 return -EINVAL;
4135         qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4136         err = get_res(dev, slave, qpn, RES_QP, &rqp);
4137         if (err) {
4138                 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4139                 return err;
4140         }
4141         rule_header = (struct _rule_hw *)(ctrl + 1);
4142         header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4143
4144         if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4145                 handle_eth_header_mcast_prio(ctrl, rule_header);
4146
4147         if (slave == dev->caps.function)
4148                 goto execute;
4149
4150         switch (header_id) {
4151         case MLX4_NET_TRANS_RULE_ID_ETH:
4152                 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4153                         err = -EINVAL;
4154                         goto err_put;
4155                 }
4156                 break;
4157         case MLX4_NET_TRANS_RULE_ID_IB:
4158                 break;
4159         case MLX4_NET_TRANS_RULE_ID_IPV4:
4160         case MLX4_NET_TRANS_RULE_ID_TCP:
4161         case MLX4_NET_TRANS_RULE_ID_UDP:
4162                 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4163                 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4164                         err = -EINVAL;
4165                         goto err_put;
4166                 }
4167                 vhcr->in_modifier +=
4168                         sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4169                 break;
4170         default:
4171                 pr_err("Corrupted mailbox\n");
4172                 err = -EINVAL;
4173                 goto err_put;
4174         }
4175
4176 execute:
4177         err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4178                            vhcr->in_modifier, 0,
4179                            MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4180                            MLX4_CMD_NATIVE);
4181         if (err)
4182                 goto err_put;
4183
4184         err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4185         if (err) {
4186                 mlx4_err(dev, "Fail to add flow steering resources\n");
4187                 /* detach rule*/
4188                 mlx4_cmd(dev, vhcr->out_param, 0, 0,
4189                          MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4190                          MLX4_CMD_NATIVE);
4191                 goto err_put;
4192         }
4193         atomic_inc(&rqp->ref_count);
4194 err_put:
4195         put_res(dev, slave, qpn, RES_QP);
4196         return err;
4197 }
4198
4199 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4200                                          struct mlx4_vhcr *vhcr,
4201                                          struct mlx4_cmd_mailbox *inbox,
4202                                          struct mlx4_cmd_mailbox *outbox,
4203                                          struct mlx4_cmd_info *cmd)
4204 {
4205         int err;
4206         struct res_qp *rqp;
4207         struct res_fs_rule *rrule;
4208
4209         if (dev->caps.steering_mode !=
4210             MLX4_STEERING_MODE_DEVICE_MANAGED)
4211                 return -EOPNOTSUPP;
4212
4213         err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4214         if (err)
4215                 return err;
4216         /* Release the rule form busy state before removal */
4217         put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4218         err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4219         if (err)
4220                 return err;
4221
4222         err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4223         if (err) {
4224                 mlx4_err(dev, "Fail to remove flow steering resources\n");
4225                 goto out;
4226         }
4227
4228         err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4229                        MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4230                        MLX4_CMD_NATIVE);
4231         if (!err)
4232                 atomic_dec(&rqp->ref_count);
4233 out:
4234         put_res(dev, slave, rrule->qpn, RES_QP);
4235         return err;
4236 }
4237
4238 enum {
4239         BUSY_MAX_RETRIES = 10
4240 };
4241
4242 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4243                                struct mlx4_vhcr *vhcr,
4244                                struct mlx4_cmd_mailbox *inbox,
4245                                struct mlx4_cmd_mailbox *outbox,
4246                                struct mlx4_cmd_info *cmd)
4247 {
4248         int err;
4249         int index = vhcr->in_modifier & 0xffff;
4250
4251         err = get_res(dev, slave, index, RES_COUNTER, NULL);
4252         if (err)
4253                 return err;
4254
4255         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4256         put_res(dev, slave, index, RES_COUNTER);
4257         return err;
4258 }
4259
4260 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4261 {
4262         struct res_gid *rgid;
4263         struct res_gid *tmp;
4264         struct mlx4_qp qp; /* dummy for calling attach/detach */
4265
4266         list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4267                 switch (dev->caps.steering_mode) {
4268                 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4269                         mlx4_flow_detach(dev, rgid->reg_id);
4270                         break;
4271                 case MLX4_STEERING_MODE_B0:
4272                         qp.qpn = rqp->local_qpn;
4273                         (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4274                                                      rgid->prot, rgid->steer);
4275                         break;
4276                 }
4277                 list_del(&rgid->list);
4278                 kfree(rgid);
4279         }
4280 }
4281
4282 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4283                           enum mlx4_resource type, int print)
4284 {
4285         struct mlx4_priv *priv = mlx4_priv(dev);
4286         struct mlx4_resource_tracker *tracker =
4287                 &priv->mfunc.master.res_tracker;
4288         struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4289         struct res_common *r;
4290         struct res_common *tmp;
4291         int busy;
4292
4293         busy = 0;
4294         spin_lock_irq(mlx4_tlock(dev));
4295         list_for_each_entry_safe(r, tmp, rlist, list) {
4296                 if (r->owner == slave) {
4297                         if (!r->removing) {
4298                                 if (r->state == RES_ANY_BUSY) {
4299                                         if (print)
4300                                                 mlx4_dbg(dev,
4301                                                          "%s id 0x%llx is busy\n",
4302                                                           resource_str(type),
4303                                                           r->res_id);
4304                                         ++busy;
4305                                 } else {
4306                                         r->from_state = r->state;
4307                                         r->state = RES_ANY_BUSY;
4308                                         r->removing = 1;
4309                                 }
4310                         }
4311                 }
4312         }
4313         spin_unlock_irq(mlx4_tlock(dev));
4314
4315         return busy;
4316 }
4317
4318 static int move_all_busy(struct mlx4_dev *dev, int slave,
4319                          enum mlx4_resource type)
4320 {
4321         unsigned long begin;
4322         int busy;
4323
4324         begin = jiffies;
4325         do {
4326                 busy = _move_all_busy(dev, slave, type, 0);
4327                 if (time_after(jiffies, begin + 5 * HZ))
4328                         break;
4329                 if (busy)
4330                         cond_resched();
4331         } while (busy);
4332
4333         if (busy)
4334                 busy = _move_all_busy(dev, slave, type, 1);
4335
4336         return busy;
4337 }
4338 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4339 {
4340         struct mlx4_priv *priv = mlx4_priv(dev);
4341         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4342         struct list_head *qp_list =
4343                 &tracker->slave_list[slave].res_list[RES_QP];
4344         struct res_qp *qp;
4345         struct res_qp *tmp;
4346         int state;
4347         u64 in_param;
4348         int qpn;
4349         int err;
4350
4351         err = move_all_busy(dev, slave, RES_QP);
4352         if (err)
4353                 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4354                           slave);
4355
4356         spin_lock_irq(mlx4_tlock(dev));
4357         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4358                 spin_unlock_irq(mlx4_tlock(dev));
4359                 if (qp->com.owner == slave) {
4360                         qpn = qp->com.res_id;
4361                         detach_qp(dev, slave, qp);
4362                         state = qp->com.from_state;
4363                         while (state != 0) {
4364                                 switch (state) {
4365                                 case RES_QP_RESERVED:
4366                                         spin_lock_irq(mlx4_tlock(dev));
4367                                         rb_erase(&qp->com.node,
4368                                                  &tracker->res_tree[RES_QP]);
4369                                         list_del(&qp->com.list);
4370                                         spin_unlock_irq(mlx4_tlock(dev));
4371                                         if (!valid_reserved(dev, slave, qpn)) {
4372                                                 __mlx4_qp_release_range(dev, qpn, 1);
4373                                                 mlx4_release_resource(dev, slave,
4374                                                                       RES_QP, 1, 0);
4375                                         }
4376                                         kfree(qp);
4377                                         state = 0;
4378                                         break;
4379                                 case RES_QP_MAPPED:
4380                                         if (!valid_reserved(dev, slave, qpn))
4381                                                 __mlx4_qp_free_icm(dev, qpn);
4382                                         state = RES_QP_RESERVED;
4383                                         break;
4384                                 case RES_QP_HW:
4385                                         in_param = slave;
4386                                         err = mlx4_cmd(dev, in_param,
4387                                                        qp->local_qpn, 2,
4388                                                        MLX4_CMD_2RST_QP,
4389                                                        MLX4_CMD_TIME_CLASS_A,
4390                                                        MLX4_CMD_NATIVE);
4391                                         if (err)
4392                                                 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4393                                                          slave, qp->local_qpn);
4394                                         atomic_dec(&qp->rcq->ref_count);
4395                                         atomic_dec(&qp->scq->ref_count);
4396                                         atomic_dec(&qp->mtt->ref_count);
4397                                         if (qp->srq)
4398                                                 atomic_dec(&qp->srq->ref_count);
4399                                         state = RES_QP_MAPPED;
4400                                         break;
4401                                 default:
4402                                         state = 0;
4403                                 }
4404                         }
4405                 }
4406                 spin_lock_irq(mlx4_tlock(dev));
4407         }
4408         spin_unlock_irq(mlx4_tlock(dev));
4409 }
4410
4411 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4412 {
4413         struct mlx4_priv *priv = mlx4_priv(dev);
4414         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4415         struct list_head *srq_list =
4416                 &tracker->slave_list[slave].res_list[RES_SRQ];
4417         struct res_srq *srq;
4418         struct res_srq *tmp;
4419         int state;
4420         u64 in_param;
4421         LIST_HEAD(tlist);
4422         int srqn;
4423         int err;
4424
4425         err = move_all_busy(dev, slave, RES_SRQ);
4426         if (err)
4427                 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4428                           slave);
4429
4430         spin_lock_irq(mlx4_tlock(dev));
4431         list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4432                 spin_unlock_irq(mlx4_tlock(dev));
4433                 if (srq->com.owner == slave) {
4434                         srqn = srq->com.res_id;
4435                         state = srq->com.from_state;
4436                         while (state != 0) {
4437                                 switch (state) {
4438                                 case RES_SRQ_ALLOCATED:
4439                                         __mlx4_srq_free_icm(dev, srqn);
4440                                         spin_lock_irq(mlx4_tlock(dev));
4441                                         rb_erase(&srq->com.node,
4442                                                  &tracker->res_tree[RES_SRQ]);
4443                                         list_del(&srq->com.list);
4444                                         spin_unlock_irq(mlx4_tlock(dev));
4445                                         mlx4_release_resource(dev, slave,
4446                                                               RES_SRQ, 1, 0);
4447                                         kfree(srq);
4448                                         state = 0;
4449                                         break;
4450
4451                                 case RES_SRQ_HW:
4452                                         in_param = slave;
4453                                         err = mlx4_cmd(dev, in_param, srqn, 1,
4454                                                        MLX4_CMD_HW2SW_SRQ,
4455                                                        MLX4_CMD_TIME_CLASS_A,
4456                                                        MLX4_CMD_NATIVE);
4457                                         if (err)
4458                                                 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4459                                                          slave, srqn);
4460
4461                                         atomic_dec(&srq->mtt->ref_count);
4462                                         if (srq->cq)
4463                                                 atomic_dec(&srq->cq->ref_count);
4464                                         state = RES_SRQ_ALLOCATED;
4465                                         break;
4466
4467                                 default:
4468                                         state = 0;
4469                                 }
4470                         }
4471                 }
4472                 spin_lock_irq(mlx4_tlock(dev));
4473         }
4474         spin_unlock_irq(mlx4_tlock(dev));
4475 }
4476
4477 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4478 {
4479         struct mlx4_priv *priv = mlx4_priv(dev);
4480         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4481         struct list_head *cq_list =
4482                 &tracker->slave_list[slave].res_list[RES_CQ];
4483         struct res_cq *cq;
4484         struct res_cq *tmp;
4485         int state;
4486         u64 in_param;
4487         LIST_HEAD(tlist);
4488         int cqn;
4489         int err;
4490
4491         err = move_all_busy(dev, slave, RES_CQ);
4492         if (err)
4493                 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4494                           slave);
4495
4496         spin_lock_irq(mlx4_tlock(dev));
4497         list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4498                 spin_unlock_irq(mlx4_tlock(dev));
4499                 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4500                         cqn = cq->com.res_id;
4501                         state = cq->com.from_state;
4502                         while (state != 0) {
4503                                 switch (state) {
4504                                 case RES_CQ_ALLOCATED:
4505                                         __mlx4_cq_free_icm(dev, cqn);
4506                                         spin_lock_irq(mlx4_tlock(dev));
4507                                         rb_erase(&cq->com.node,
4508                                                  &tracker->res_tree[RES_CQ]);
4509                                         list_del(&cq->com.list);
4510                                         spin_unlock_irq(mlx4_tlock(dev));
4511                                         mlx4_release_resource(dev, slave,
4512                                                               RES_CQ, 1, 0);
4513                                         kfree(cq);
4514                                         state = 0;
4515                                         break;
4516
4517                                 case RES_CQ_HW:
4518                                         in_param = slave;
4519                                         err = mlx4_cmd(dev, in_param, cqn, 1,
4520                                                        MLX4_CMD_HW2SW_CQ,
4521                                                        MLX4_CMD_TIME_CLASS_A,
4522                                                        MLX4_CMD_NATIVE);
4523                                         if (err)
4524                                                 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4525                                                          slave, cqn);
4526                                         atomic_dec(&cq->mtt->ref_count);
4527                                         state = RES_CQ_ALLOCATED;
4528                                         break;
4529
4530                                 default:
4531                                         state = 0;
4532                                 }
4533                         }
4534                 }
4535                 spin_lock_irq(mlx4_tlock(dev));
4536         }
4537         spin_unlock_irq(mlx4_tlock(dev));
4538 }
4539
4540 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4541 {
4542         struct mlx4_priv *priv = mlx4_priv(dev);
4543         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4544         struct list_head *mpt_list =
4545                 &tracker->slave_list[slave].res_list[RES_MPT];
4546         struct res_mpt *mpt;
4547         struct res_mpt *tmp;
4548         int state;
4549         u64 in_param;
4550         LIST_HEAD(tlist);
4551         int mptn;
4552         int err;
4553
4554         err = move_all_busy(dev, slave, RES_MPT);
4555         if (err)
4556                 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4557                           slave);
4558
4559         spin_lock_irq(mlx4_tlock(dev));
4560         list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4561                 spin_unlock_irq(mlx4_tlock(dev));
4562                 if (mpt->com.owner == slave) {
4563                         mptn = mpt->com.res_id;
4564                         state = mpt->com.from_state;
4565                         while (state != 0) {
4566                                 switch (state) {
4567                                 case RES_MPT_RESERVED:
4568                                         __mlx4_mpt_release(dev, mpt->key);
4569                                         spin_lock_irq(mlx4_tlock(dev));
4570                                         rb_erase(&mpt->com.node,
4571                                                  &tracker->res_tree[RES_MPT]);
4572                                         list_del(&mpt->com.list);
4573                                         spin_unlock_irq(mlx4_tlock(dev));
4574                                         mlx4_release_resource(dev, slave,
4575                                                               RES_MPT, 1, 0);
4576                                         kfree(mpt);
4577                                         state = 0;
4578                                         break;
4579
4580                                 case RES_MPT_MAPPED:
4581                                         __mlx4_mpt_free_icm(dev, mpt->key);
4582                                         state = RES_MPT_RESERVED;
4583                                         break;
4584
4585                                 case RES_MPT_HW:
4586                                         in_param = slave;
4587                                         err = mlx4_cmd(dev, in_param, mptn, 0,
4588                                                      MLX4_CMD_HW2SW_MPT,
4589                                                      MLX4_CMD_TIME_CLASS_A,
4590                                                      MLX4_CMD_NATIVE);
4591                                         if (err)
4592                                                 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4593                                                          slave, mptn);
4594                                         if (mpt->mtt)
4595                                                 atomic_dec(&mpt->mtt->ref_count);
4596                                         state = RES_MPT_MAPPED;
4597                                         break;
4598                                 default:
4599                                         state = 0;
4600                                 }
4601                         }
4602                 }
4603                 spin_lock_irq(mlx4_tlock(dev));
4604         }
4605         spin_unlock_irq(mlx4_tlock(dev));
4606 }
4607
4608 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4609 {
4610         struct mlx4_priv *priv = mlx4_priv(dev);
4611         struct mlx4_resource_tracker *tracker =
4612                 &priv->mfunc.master.res_tracker;
4613         struct list_head *mtt_list =
4614                 &tracker->slave_list[slave].res_list[RES_MTT];
4615         struct res_mtt *mtt;
4616         struct res_mtt *tmp;
4617         int state;
4618         LIST_HEAD(tlist);
4619         int base;
4620         int err;
4621
4622         err = move_all_busy(dev, slave, RES_MTT);
4623         if (err)
4624                 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4625                           slave);
4626
4627         spin_lock_irq(mlx4_tlock(dev));
4628         list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4629                 spin_unlock_irq(mlx4_tlock(dev));
4630                 if (mtt->com.owner == slave) {
4631                         base = mtt->com.res_id;
4632                         state = mtt->com.from_state;
4633                         while (state != 0) {
4634                                 switch (state) {
4635                                 case RES_MTT_ALLOCATED:
4636                                         __mlx4_free_mtt_range(dev, base,
4637                                                               mtt->order);
4638                                         spin_lock_irq(mlx4_tlock(dev));
4639                                         rb_erase(&mtt->com.node,
4640                                                  &tracker->res_tree[RES_MTT]);
4641                                         list_del(&mtt->com.list);
4642                                         spin_unlock_irq(mlx4_tlock(dev));
4643                                         mlx4_release_resource(dev, slave, RES_MTT,
4644                                                               1 << mtt->order, 0);
4645                                         kfree(mtt);
4646                                         state = 0;
4647                                         break;
4648
4649                                 default:
4650                                         state = 0;
4651                                 }
4652                         }
4653                 }
4654                 spin_lock_irq(mlx4_tlock(dev));
4655         }
4656         spin_unlock_irq(mlx4_tlock(dev));
4657 }
4658
4659 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4660 {
4661         struct mlx4_priv *priv = mlx4_priv(dev);
4662         struct mlx4_resource_tracker *tracker =
4663                 &priv->mfunc.master.res_tracker;
4664         struct list_head *fs_rule_list =
4665                 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4666         struct res_fs_rule *fs_rule;
4667         struct res_fs_rule *tmp;
4668         int state;
4669         u64 base;
4670         int err;
4671
4672         err = move_all_busy(dev, slave, RES_FS_RULE);
4673         if (err)
4674                 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4675                           slave);
4676
4677         spin_lock_irq(mlx4_tlock(dev));
4678         list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4679                 spin_unlock_irq(mlx4_tlock(dev));
4680                 if (fs_rule->com.owner == slave) {
4681                         base = fs_rule->com.res_id;
4682                         state = fs_rule->com.from_state;
4683                         while (state != 0) {
4684                                 switch (state) {
4685                                 case RES_FS_RULE_ALLOCATED:
4686                                         /* detach rule */
4687                                         err = mlx4_cmd(dev, base, 0, 0,
4688                                                        MLX4_QP_FLOW_STEERING_DETACH,
4689                                                        MLX4_CMD_TIME_CLASS_A,
4690                                                        MLX4_CMD_NATIVE);
4691
4692                                         spin_lock_irq(mlx4_tlock(dev));
4693                                         rb_erase(&fs_rule->com.node,
4694                                                  &tracker->res_tree[RES_FS_RULE]);
4695                                         list_del(&fs_rule->com.list);
4696                                         spin_unlock_irq(mlx4_tlock(dev));
4697                                         kfree(fs_rule);
4698                                         state = 0;
4699                                         break;
4700
4701                                 default:
4702                                         state = 0;
4703                                 }
4704                         }
4705                 }
4706                 spin_lock_irq(mlx4_tlock(dev));
4707         }
4708         spin_unlock_irq(mlx4_tlock(dev));
4709 }
4710
4711 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4712 {
4713         struct mlx4_priv *priv = mlx4_priv(dev);
4714         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4715         struct list_head *eq_list =
4716                 &tracker->slave_list[slave].res_list[RES_EQ];
4717         struct res_eq *eq;
4718         struct res_eq *tmp;
4719         int err;
4720         int state;
4721         LIST_HEAD(tlist);
4722         int eqn;
4723
4724         err = move_all_busy(dev, slave, RES_EQ);
4725         if (err)
4726                 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4727                           slave);
4728
4729         spin_lock_irq(mlx4_tlock(dev));
4730         list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4731                 spin_unlock_irq(mlx4_tlock(dev));
4732                 if (eq->com.owner == slave) {
4733                         eqn = eq->com.res_id;
4734                         state = eq->com.from_state;
4735                         while (state != 0) {
4736                                 switch (state) {
4737                                 case RES_EQ_RESERVED:
4738                                         spin_lock_irq(mlx4_tlock(dev));
4739                                         rb_erase(&eq->com.node,
4740                                                  &tracker->res_tree[RES_EQ]);
4741                                         list_del(&eq->com.list);
4742                                         spin_unlock_irq(mlx4_tlock(dev));
4743                                         kfree(eq);
4744                                         state = 0;
4745                                         break;
4746
4747                                 case RES_EQ_HW:
4748                                         err = mlx4_cmd(dev, slave, eqn & 0x3ff,
4749                                                        1, MLX4_CMD_HW2SW_EQ,
4750                                                        MLX4_CMD_TIME_CLASS_A,
4751                                                        MLX4_CMD_NATIVE);
4752                                         if (err)
4753                                                 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
4754                                                          slave, eqn & 0x3ff);
4755                                         atomic_dec(&eq->mtt->ref_count);
4756                                         state = RES_EQ_RESERVED;
4757                                         break;
4758
4759                                 default:
4760                                         state = 0;
4761                                 }
4762                         }
4763                 }
4764                 spin_lock_irq(mlx4_tlock(dev));
4765         }
4766         spin_unlock_irq(mlx4_tlock(dev));
4767 }
4768
4769 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4770 {
4771         struct mlx4_priv *priv = mlx4_priv(dev);
4772         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4773         struct list_head *counter_list =
4774                 &tracker->slave_list[slave].res_list[RES_COUNTER];
4775         struct res_counter *counter;
4776         struct res_counter *tmp;
4777         int err;
4778         int index;
4779
4780         err = move_all_busy(dev, slave, RES_COUNTER);
4781         if (err)
4782                 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4783                           slave);
4784
4785         spin_lock_irq(mlx4_tlock(dev));
4786         list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4787                 if (counter->com.owner == slave) {
4788                         index = counter->com.res_id;
4789                         rb_erase(&counter->com.node,
4790                                  &tracker->res_tree[RES_COUNTER]);
4791                         list_del(&counter->com.list);
4792                         kfree(counter);
4793                         __mlx4_counter_free(dev, index);
4794                         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
4795                 }
4796         }
4797         spin_unlock_irq(mlx4_tlock(dev));
4798 }
4799
4800 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4801 {
4802         struct mlx4_priv *priv = mlx4_priv(dev);
4803         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4804         struct list_head *xrcdn_list =
4805                 &tracker->slave_list[slave].res_list[RES_XRCD];
4806         struct res_xrcdn *xrcd;
4807         struct res_xrcdn *tmp;
4808         int err;
4809         int xrcdn;
4810
4811         err = move_all_busy(dev, slave, RES_XRCD);
4812         if (err)
4813                 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4814                           slave);
4815
4816         spin_lock_irq(mlx4_tlock(dev));
4817         list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4818                 if (xrcd->com.owner == slave) {
4819                         xrcdn = xrcd->com.res_id;
4820                         rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4821                         list_del(&xrcd->com.list);
4822                         kfree(xrcd);
4823                         __mlx4_xrcd_free(dev, xrcdn);
4824                 }
4825         }
4826         spin_unlock_irq(mlx4_tlock(dev));
4827 }
4828
4829 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4830 {
4831         struct mlx4_priv *priv = mlx4_priv(dev);
4832         mlx4_reset_roce_gids(dev, slave);
4833         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4834         rem_slave_vlans(dev, slave);
4835         rem_slave_macs(dev, slave);
4836         rem_slave_fs_rule(dev, slave);
4837         rem_slave_qps(dev, slave);
4838         rem_slave_srqs(dev, slave);
4839         rem_slave_cqs(dev, slave);
4840         rem_slave_mrs(dev, slave);
4841         rem_slave_eqs(dev, slave);
4842         rem_slave_mtts(dev, slave);
4843         rem_slave_counters(dev, slave);
4844         rem_slave_xrcdns(dev, slave);
4845         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4846 }
4847
4848 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4849 {
4850         struct mlx4_vf_immed_vlan_work *work =
4851                 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4852         struct mlx4_cmd_mailbox *mailbox;
4853         struct mlx4_update_qp_context *upd_context;
4854         struct mlx4_dev *dev = &work->priv->dev;
4855         struct mlx4_resource_tracker *tracker =
4856                 &work->priv->mfunc.master.res_tracker;
4857         struct list_head *qp_list =
4858                 &tracker->slave_list[work->slave].res_list[RES_QP];
4859         struct res_qp *qp;
4860         struct res_qp *tmp;
4861         u64 qp_path_mask_vlan_ctrl =
4862                        ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4863                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4864                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4865                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4866                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4867                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4868
4869         u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4870                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4871                        (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4872                        (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4873                        (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4874                        (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4875                        (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4876
4877         int err;
4878         int port, errors = 0;
4879         u8 vlan_control;
4880
4881         if (mlx4_is_slave(dev)) {
4882                 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4883                           work->slave);
4884                 goto out;
4885         }
4886
4887         mailbox = mlx4_alloc_cmd_mailbox(dev);
4888         if (IS_ERR(mailbox))
4889                 goto out;
4890         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4891                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4892                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4893                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4894                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4895                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4896                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4897         else if (!work->vlan_id)
4898                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4899                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4900         else
4901                 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4902                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4903                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4904
4905         upd_context = mailbox->buf;
4906         upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4907
4908         spin_lock_irq(mlx4_tlock(dev));
4909         list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4910                 spin_unlock_irq(mlx4_tlock(dev));
4911                 if (qp->com.owner == work->slave) {
4912                         if (qp->com.from_state != RES_QP_HW ||
4913                             !qp->sched_queue ||  /* no INIT2RTR trans yet */
4914                             mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4915                             qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4916                                 spin_lock_irq(mlx4_tlock(dev));
4917                                 continue;
4918                         }
4919                         port = (qp->sched_queue >> 6 & 1) + 1;
4920                         if (port != work->port) {
4921                                 spin_lock_irq(mlx4_tlock(dev));
4922                                 continue;
4923                         }
4924                         if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4925                                 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4926                         else
4927                                 upd_context->primary_addr_path_mask =
4928                                         cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4929                         if (work->vlan_id == MLX4_VGT) {
4930                                 upd_context->qp_context.param3 = qp->param3;
4931                                 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4932                                 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4933                                 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4934                                 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4935                                 upd_context->qp_context.pri_path.feup = qp->feup;
4936                                 upd_context->qp_context.pri_path.sched_queue =
4937                                         qp->sched_queue;
4938                         } else {
4939                                 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4940                                 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4941                                 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4942                                 upd_context->qp_context.pri_path.fvl_rx =
4943                                         qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4944                                 upd_context->qp_context.pri_path.fl =
4945                                         qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4946                                 upd_context->qp_context.pri_path.feup =
4947                                         qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4948                                 upd_context->qp_context.pri_path.sched_queue =
4949                                         qp->sched_queue & 0xC7;
4950                                 upd_context->qp_context.pri_path.sched_queue |=
4951                                         ((work->qos & 0x7) << 3);
4952                                 upd_context->qp_mask |=
4953                                         cpu_to_be64(1ULL <<
4954                                                     MLX4_UPD_QP_MASK_QOS_VPP);
4955                                 upd_context->qp_context.qos_vport =
4956                                         work->qos_vport;
4957                         }
4958
4959                         err = mlx4_cmd(dev, mailbox->dma,
4960                                        qp->local_qpn & 0xffffff,
4961                                        0, MLX4_CMD_UPDATE_QP,
4962                                        MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4963                         if (err) {
4964                                 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4965                                           work->slave, port, qp->local_qpn, err);
4966                                 errors++;
4967                         }
4968                 }
4969                 spin_lock_irq(mlx4_tlock(dev));
4970         }
4971         spin_unlock_irq(mlx4_tlock(dev));
4972         mlx4_free_cmd_mailbox(dev, mailbox);
4973
4974         if (errors)
4975                 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4976                          errors, work->slave, work->port);
4977
4978         /* unregister previous vlan_id if needed and we had no errors
4979          * while updating the QPs
4980          */
4981         if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4982             NO_INDX != work->orig_vlan_ix)
4983                 __mlx4_unregister_vlan(&work->priv->dev, work->port,
4984                                        work->orig_vlan_id);
4985 out:
4986         kfree(work);
4987         return;
4988 }