]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
d6667a198d0b5856d38d87ac86c49460b7ec1f06
[karo-tx-linux.git] / drivers / infiniband / hw / usnic / usnic_ib_qp_grp.c
1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This program is free software; you may redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15  * SOFTWARE.
16  *
17  */
18 #include <linux/bug.h>
19 #include <linux/errno.h>
20 #include <linux/module.h>
21 #include <linux/spinlock.h>
22
23 #include "usnic_log.h"
24 #include "usnic_vnic.h"
25 #include "usnic_fwd.h"
26 #include "usnic_uiom.h"
27 #include "usnic_ib_qp_grp.h"
28 #include "usnic_ib_sysfs.h"
29 #include "usnic_transport.h"
30
31 #define DFLT_RQ_IDX     0
32
33 const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
34 {
35         switch (state) {
36         case IB_QPS_RESET:
37                 return "Rst";
38         case IB_QPS_INIT:
39                 return "Init";
40         case IB_QPS_RTR:
41                 return "RTR";
42         case IB_QPS_RTS:
43                 return "RTS";
44         case IB_QPS_SQD:
45                 return "SQD";
46         case IB_QPS_SQE:
47                 return "SQE";
48         case IB_QPS_ERR:
49                 return "ERR";
50         default:
51                 return "UNKOWN STATE";
52
53         }
54 }
55
56 int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
57 {
58         return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
59 }
60
61 int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
62 {
63         struct usnic_ib_qp_grp *qp_grp = obj;
64         struct usnic_ib_qp_grp_flow *default_flow;
65         if (obj) {
66                 default_flow = list_first_entry(&qp_grp->flows_lst,
67                                         struct usnic_ib_qp_grp_flow, link);
68                 return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
69                                         qp_grp->ibqp.qp_num,
70                                         usnic_ib_qp_grp_state_to_string(
71                                                         qp_grp->state),
72                                         qp_grp->owner_pid,
73                                         usnic_vnic_get_index(qp_grp->vf->vnic),
74                                         default_flow->flow->flow_id);
75         } else {
76                 return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
77         }
78 }
79
80 static struct usnic_vnic_res_chunk *
81 get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
82 {
83         lockdep_assert_held(&qp_grp->lock);
84         /*
85          * The QP res chunk, used to derive qp indices,
86          * are just indices of the RQs
87          */
88         return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
89 }
90
91 static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
92 {
93
94         int status;
95         int i, vnic_idx;
96         struct usnic_vnic_res_chunk *res_chunk;
97         struct usnic_vnic_res *res;
98
99         lockdep_assert_held(&qp_grp->lock);
100
101         vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
102
103         res_chunk = get_qp_res_chunk(qp_grp);
104         if (IS_ERR_OR_NULL(res_chunk)) {
105                 usnic_err("Unable to get qp res with err %ld\n",
106                                 PTR_ERR(res_chunk));
107                 return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
108         }
109
110         for (i = 0; i < res_chunk->cnt; i++) {
111                 res = res_chunk->res[i];
112                 status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
113                                                 res->vnic_idx);
114                 if (status) {
115                         usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
116                                         res->vnic_idx, qp_grp->ufdev->name,
117                                         vnic_idx, status);
118                         goto out_err;
119                 }
120         }
121
122         return 0;
123
124 out_err:
125         for (i--; i >= 0; i--) {
126                 res = res_chunk->res[i];
127                 usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
128                                         res->vnic_idx);
129         }
130
131         return status;
132 }
133
134 static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
135 {
136         int i, vnic_idx;
137         struct usnic_vnic_res_chunk *res_chunk;
138         struct usnic_vnic_res *res;
139         int status = 0;
140
141         lockdep_assert_held(&qp_grp->lock);
142         vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
143
144         res_chunk = get_qp_res_chunk(qp_grp);
145         if (IS_ERR_OR_NULL(res_chunk)) {
146                 usnic_err("Unable to get qp res with err %ld\n",
147                         PTR_ERR(res_chunk));
148                 return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
149         }
150
151         for (i = 0; i < res_chunk->cnt; i++) {
152                 res = res_chunk->res[i];
153                 status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
154                                                 res->vnic_idx);
155                 if (status) {
156                         usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
157                                         res->vnic_idx,
158                                         qp_grp->ufdev->name,
159                                         vnic_idx, status);
160                 }
161         }
162
163         return status;
164
165 }
166
167 static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
168                                 struct usnic_filter_action *uaction)
169 {
170         struct usnic_vnic_res_chunk *res_chunk;
171
172         res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
173         if (IS_ERR_OR_NULL(res_chunk)) {
174                 usnic_err("Unable to get %s with err %ld\n",
175                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
176                         PTR_ERR(res_chunk));
177                 return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
178         }
179
180         uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
181         uaction->action.type = FILTER_ACTION_RQ_STEERING;
182         uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
183
184         return 0;
185 }
186
187 static struct usnic_ib_qp_grp_flow*
188 create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
189                         struct usnic_transport_spec *trans_spec)
190 {
191         uint16_t port_num;
192         int err;
193         struct filter filter;
194         struct usnic_filter_action uaction;
195         struct usnic_ib_qp_grp_flow *qp_flow;
196         struct usnic_fwd_flow *flow;
197         enum usnic_transport_type trans_type;
198
199         trans_type = trans_spec->trans_type;
200         port_num = trans_spec->usnic_roce.port_num;
201
202         /* Reserve Port */
203         port_num = usnic_transport_rsrv_port(trans_type, port_num);
204         if (port_num == 0)
205                 return ERR_PTR(-EINVAL);
206
207         /* Create Flow */
208         usnic_fwd_init_usnic_filter(&filter, port_num);
209         err = init_filter_action(qp_grp, &uaction);
210         if (err)
211                 goto out_unreserve_port;
212
213         flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
214         if (IS_ERR_OR_NULL(flow)) {
215                 usnic_err("Unable to alloc flow failed with err %ld\n",
216                                 PTR_ERR(flow));
217                 err = (flow) ? PTR_ERR(flow) : -EFAULT;
218                 goto out_unreserve_port;
219         }
220
221         /* Create Flow Handle */
222         qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
223         if (IS_ERR_OR_NULL(qp_flow)) {
224                 err = (qp_flow) ? PTR_ERR(qp_flow) : -ENOMEM;
225                 goto out_dealloc_flow;
226         }
227         qp_flow->flow = flow;
228         qp_flow->trans_type = trans_type;
229         qp_flow->usnic_roce.port_num = port_num;
230         qp_flow->qp_grp = qp_grp;
231         return qp_flow;
232
233 out_dealloc_flow:
234         usnic_fwd_dealloc_flow(flow);
235 out_unreserve_port:
236         usnic_transport_unrsrv_port(trans_type, port_num);
237         return ERR_PTR(err);
238 }
239
240 static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
241 {
242         usnic_fwd_dealloc_flow(qp_flow->flow);
243         usnic_transport_unrsrv_port(qp_flow->trans_type,
244                                         qp_flow->usnic_roce.port_num);
245         kfree(qp_flow);
246 }
247
248 static struct usnic_ib_qp_grp_flow*
249 create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
250                 struct usnic_transport_spec *trans_spec)
251 {
252         struct socket *sock;
253         int sock_fd;
254         int err;
255         struct filter filter;
256         struct usnic_filter_action uaction;
257         struct usnic_ib_qp_grp_flow *qp_flow;
258         struct usnic_fwd_flow *flow;
259         enum usnic_transport_type trans_type;
260         uint32_t addr;
261         uint16_t port_num;
262         int proto;
263
264         trans_type = trans_spec->trans_type;
265         sock_fd = trans_spec->udp.sock_fd;
266
267         /* Get and check socket */
268         sock = usnic_transport_get_socket(sock_fd);
269         if (IS_ERR_OR_NULL(sock))
270                 return ERR_CAST(sock);
271
272         err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
273         if (err)
274                 goto out_put_sock;
275
276         if (proto != IPPROTO_UDP) {
277                 usnic_err("Protocol for fd %d is not UDP", sock_fd);
278                 err = -EPERM;
279                 goto out_put_sock;
280         }
281
282         /* Create flow */
283         usnic_fwd_init_udp_filter(&filter, addr, port_num);
284         err = init_filter_action(qp_grp, &uaction);
285         if (err)
286                 goto out_put_sock;
287
288         flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
289         if (IS_ERR_OR_NULL(flow)) {
290                 usnic_err("Unable to alloc flow failed with err %ld\n",
291                                 PTR_ERR(flow));
292                 err = (flow) ? PTR_ERR(flow) : -EFAULT;
293                 goto out_put_sock;
294         }
295
296         /* Create qp_flow */
297         qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
298         if (IS_ERR_OR_NULL(qp_flow)) {
299                 err = (qp_flow) ? PTR_ERR(qp_flow) : -ENOMEM;
300                 goto out_dealloc_flow;
301         }
302         qp_flow->flow = flow;
303         qp_flow->trans_type = trans_type;
304         qp_flow->udp.sock = sock;
305         qp_flow->qp_grp = qp_grp;
306         return qp_flow;
307
308 out_dealloc_flow:
309         usnic_fwd_dealloc_flow(flow);
310 out_put_sock:
311         usnic_transport_put_socket(sock);
312         return ERR_PTR(err);
313 }
314
315 static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
316 {
317         usnic_fwd_dealloc_flow(qp_flow->flow);
318         usnic_transport_put_socket(qp_flow->udp.sock);
319         kfree(qp_flow);
320 }
321
322 static struct usnic_ib_qp_grp_flow*
323 create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
324                         struct usnic_transport_spec *trans_spec)
325 {
326         struct usnic_ib_qp_grp_flow *qp_flow;
327         enum usnic_transport_type trans_type;
328
329         trans_type = trans_spec->trans_type;
330         switch (trans_type) {
331         case USNIC_TRANSPORT_ROCE_CUSTOM:
332                 qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
333                 break;
334         case USNIC_TRANSPORT_IPV4_UDP:
335                 qp_flow = create_udp_flow(qp_grp, trans_spec);
336                 break;
337         default:
338                 usnic_err("Unsupported transport %u\n",
339                                 trans_spec->trans_type);
340                 return ERR_PTR(-EINVAL);
341         }
342
343         if (!IS_ERR_OR_NULL(qp_flow))
344                 list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
345
346
347         return qp_flow;
348 }
349
350 static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
351 {
352         list_del(&qp_flow->link);
353
354         switch (qp_flow->trans_type) {
355         case USNIC_TRANSPORT_ROCE_CUSTOM:
356                 release_roce_custom_flow(qp_flow);
357                 break;
358         case USNIC_TRANSPORT_IPV4_UDP:
359                 release_udp_flow(qp_flow);
360                 break;
361         default:
362                 WARN(1, "Unsupported transport %u\n",
363                                 qp_flow->trans_type);
364                 break;
365         }
366 }
367
368 static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
369 {
370         struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
371         list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
372                 release_and_remove_flow(qp_flow);
373 }
374
375 int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
376                                 enum ib_qp_state new_state,
377                                 void *data)
378 {
379         int status = 0;
380         int vnic_idx;
381         struct ib_event ib_event;
382         enum ib_qp_state old_state;
383         struct usnic_transport_spec *trans_spec;
384         struct usnic_ib_qp_grp_flow *qp_flow;
385
386         old_state = qp_grp->state;
387         vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
388         trans_spec = (struct usnic_transport_spec *) data;
389
390         spin_lock(&qp_grp->lock);
391         switch (new_state) {
392         case IB_QPS_RESET:
393                 switch (old_state) {
394                 case IB_QPS_RESET:
395                         /* NO-OP */
396                         break;
397                 case IB_QPS_INIT:
398                         release_and_remove_all_flows(qp_grp);
399                         status = 0;
400                         break;
401                 case IB_QPS_RTR:
402                 case IB_QPS_RTS:
403                 case IB_QPS_ERR:
404                         status = disable_qp_grp(qp_grp);
405                         release_and_remove_all_flows(qp_grp);
406                         break;
407                 default:
408                         status = -EINVAL;
409                 }
410                 break;
411         case IB_QPS_INIT:
412                 switch (old_state) {
413                 case IB_QPS_RESET:
414                         if (trans_spec) {
415                                 qp_flow = create_and_add_flow(qp_grp,
416                                                                 trans_spec);
417                                 if (IS_ERR_OR_NULL(qp_flow)) {
418                                         status = (qp_flow) ? PTR_ERR(qp_flow) : -EFAULT;
419                                         break;
420                                 }
421                         } else {
422                                 /*
423                                  * Optional to specify filters.
424                                  */
425                                 status = 0;
426                         }
427                         break;
428                 case IB_QPS_INIT:
429                         if (trans_spec) {
430                                 qp_flow = create_and_add_flow(qp_grp,
431                                                                 trans_spec);
432                                 if (IS_ERR_OR_NULL(qp_flow)) {
433                                         status = (qp_flow) ? PTR_ERR(qp_flow) : -EFAULT;
434                                         break;
435                                 }
436                         } else {
437                                 /*
438                                  * Doesn't make sense to go into INIT state
439                                  * from INIT state w/o adding filters.
440                                  */
441                                 status = -EINVAL;
442                         }
443                         break;
444                 case IB_QPS_RTR:
445                         status = disable_qp_grp(qp_grp);
446                         break;
447                 case IB_QPS_RTS:
448                         status = disable_qp_grp(qp_grp);
449                         break;
450                 default:
451                         status = -EINVAL;
452                 }
453                 break;
454         case IB_QPS_RTR:
455                 switch (old_state) {
456                 case IB_QPS_INIT:
457                         status = enable_qp_grp(qp_grp);
458                         break;
459                 default:
460                         status = -EINVAL;
461                 }
462                 break;
463         case IB_QPS_RTS:
464                 switch (old_state) {
465                 case IB_QPS_RTR:
466                         /* NO-OP FOR NOW */
467                         break;
468                 default:
469                         status = -EINVAL;
470                 }
471                 break;
472         case IB_QPS_ERR:
473                 ib_event.device = &qp_grp->vf->pf->ib_dev;
474                 ib_event.element.qp = &qp_grp->ibqp;
475                 ib_event.event = IB_EVENT_QP_FATAL;
476
477                 switch (old_state) {
478                 case IB_QPS_RESET:
479                         qp_grp->ibqp.event_handler(&ib_event,
480                                         qp_grp->ibqp.qp_context);
481                         break;
482                 case IB_QPS_INIT:
483                         release_and_remove_all_flows(qp_grp);
484                         qp_grp->ibqp.event_handler(&ib_event,
485                                         qp_grp->ibqp.qp_context);
486                         break;
487                 case IB_QPS_RTR:
488                 case IB_QPS_RTS:
489                         status = disable_qp_grp(qp_grp);
490                         release_and_remove_all_flows(qp_grp);
491                         qp_grp->ibqp.event_handler(&ib_event,
492                                         qp_grp->ibqp.qp_context);
493                         break;
494                 default:
495                         status = -EINVAL;
496                 }
497                 break;
498         default:
499                 status = -EINVAL;
500         }
501         spin_unlock(&qp_grp->lock);
502
503         if (!status) {
504                 qp_grp->state = new_state;
505                 usnic_info("Transistioned %u from %s to %s",
506                 qp_grp->grp_id,
507                 usnic_ib_qp_grp_state_to_string(old_state),
508                 usnic_ib_qp_grp_state_to_string(new_state));
509         } else {
510                 usnic_err("Failed to transistion %u from %s to %s",
511                 qp_grp->grp_id,
512                 usnic_ib_qp_grp_state_to_string(old_state),
513                 usnic_ib_qp_grp_state_to_string(new_state));
514         }
515
516         return status;
517 }
518
519 static struct usnic_vnic_res_chunk**
520 alloc_res_chunk_list(struct usnic_vnic *vnic,
521                         struct usnic_vnic_res_spec *res_spec, void *owner_obj)
522 {
523         enum usnic_vnic_res_type res_type;
524         struct usnic_vnic_res_chunk **res_chunk_list;
525         int err, i, res_cnt, res_lst_sz;
526
527         for (res_lst_sz = 0;
528                 res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
529                 res_lst_sz++) {
530                 /* Do Nothing */
531         }
532
533         res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
534                                         GFP_ATOMIC);
535         if (!res_chunk_list)
536                 return ERR_PTR(-ENOMEM);
537
538         for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
539                 i++) {
540                 res_type = res_spec->resources[i].type;
541                 res_cnt = res_spec->resources[i].cnt;
542
543                 res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
544                                         res_cnt, owner_obj);
545                 if (IS_ERR_OR_NULL(res_chunk_list[i])) {
546                         err = (res_chunk_list[i] ?
547                                         PTR_ERR(res_chunk_list[i]) : -ENOMEM);
548                         usnic_err("Failed to get %s from %s with err %d\n",
549                                 usnic_vnic_res_type_to_str(res_type),
550                                 usnic_vnic_pci_name(vnic),
551                                 err);
552                         goto out_free_res;
553                 }
554         }
555
556         return res_chunk_list;
557
558 out_free_res:
559         for (i--; i > 0; i--)
560                 usnic_vnic_put_resources(res_chunk_list[i]);
561         kfree(res_chunk_list);
562         return ERR_PTR(err);
563 }
564
565 static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
566 {
567         int i;
568         for (i = 0; res_chunk_list[i]; i++)
569                 usnic_vnic_put_resources(res_chunk_list[i]);
570         kfree(res_chunk_list);
571 }
572
573 static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
574                                 struct usnic_ib_pd *pd,
575                                 struct usnic_ib_qp_grp *qp_grp)
576 {
577         int err;
578         struct pci_dev *pdev;
579
580         lockdep_assert_held(&vf->lock);
581
582         pdev = usnic_vnic_get_pdev(vf->vnic);
583         if (vf->qp_grp_ref_cnt == 0) {
584                 err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
585                 if (err) {
586                         usnic_err("Failed to attach %s to domain\n",
587                                         pci_name(pdev));
588                         return err;
589                 }
590                 vf->pd = pd;
591         }
592         vf->qp_grp_ref_cnt++;
593
594         WARN_ON(vf->pd != pd);
595         qp_grp->vf = vf;
596
597         return 0;
598 }
599
600 static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
601 {
602         struct pci_dev *pdev;
603         struct usnic_ib_pd *pd;
604
605         lockdep_assert_held(&qp_grp->vf->lock);
606
607         pd = qp_grp->vf->pd;
608         pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
609         if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
610                 qp_grp->vf->pd = NULL;
611                 usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
612         }
613         qp_grp->vf = NULL;
614 }
615
616 static void log_spec(struct usnic_vnic_res_spec *res_spec)
617 {
618         char buf[512];
619         usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
620         usnic_dbg("%s\n", buf);
621 }
622
623 static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
624                                 uint32_t *id)
625 {
626         enum usnic_transport_type trans_type = qp_flow->trans_type;
627         int err;
628
629         switch (trans_type) {
630         case USNIC_TRANSPORT_ROCE_CUSTOM:
631                 *id = qp_flow->usnic_roce.port_num;
632                 break;
633         case USNIC_TRANSPORT_IPV4_UDP:
634                 err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
635                                                         NULL, NULL,
636                                                         (uint16_t *) id);
637                 if (err)
638                         return err;
639                 break;
640         default:
641                 usnic_err("Unsupported transport %u\n", trans_type);
642                 return -EINVAL;
643         }
644
645         return 0;
646 }
647
648 struct usnic_ib_qp_grp *
649 usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
650                         struct usnic_ib_pd *pd,
651                         struct usnic_vnic_res_spec *res_spec,
652                         struct usnic_transport_spec *transport_spec)
653 {
654         struct usnic_ib_qp_grp *qp_grp;
655         int err;
656         enum usnic_transport_type transport = transport_spec->trans_type;
657         struct usnic_ib_qp_grp_flow *qp_flow;
658
659         lockdep_assert_held(&vf->lock);
660
661         err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
662                                                 res_spec);
663         if (err) {
664                 usnic_err("Spec does not meet miniumum req for transport %d\n",
665                                 transport);
666                 log_spec(res_spec);
667                 return ERR_PTR(err);
668         }
669
670         qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
671         if (!qp_grp) {
672                 usnic_err("Unable to alloc qp_grp - Out of memory\n");
673                 return NULL;
674         }
675
676         qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
677                                                         qp_grp);
678         if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
679                 err = qp_grp->res_chunk_list ?
680                                 PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
681                 usnic_err("Unable to alloc res for %d with err %d\n",
682                                 qp_grp->grp_id, err);
683                 goto out_free_qp_grp;
684         }
685
686         err = qp_grp_and_vf_bind(vf, pd, qp_grp);
687         if (err)
688                 goto out_free_res;
689
690         INIT_LIST_HEAD(&qp_grp->flows_lst);
691         spin_lock_init(&qp_grp->lock);
692         qp_grp->ufdev = ufdev;
693         qp_grp->state = IB_QPS_RESET;
694         qp_grp->owner_pid = current->pid;
695
696         qp_flow = create_and_add_flow(qp_grp, transport_spec);
697         if (IS_ERR_OR_NULL(qp_flow)) {
698                 usnic_err("Unable to create and add flow with err %ld\n",
699                                 PTR_ERR(qp_flow));
700                 err = (qp_flow) ? PTR_ERR(qp_flow) : -EFAULT;
701                 goto out_qp_grp_vf_unbind;
702         }
703
704         err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
705         if (err)
706                 goto out_release_flow;
707         qp_grp->ibqp.qp_num = qp_grp->grp_id;
708
709         usnic_ib_sysfs_qpn_add(qp_grp);
710
711         return qp_grp;
712
713 out_release_flow:
714         release_and_remove_flow(qp_flow);
715 out_qp_grp_vf_unbind:
716         qp_grp_and_vf_unbind(qp_grp);
717 out_free_res:
718         free_qp_grp_res(qp_grp->res_chunk_list);
719 out_free_qp_grp:
720         kfree(qp_grp);
721
722         return ERR_PTR(err);
723 }
724
725 void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
726 {
727
728         WARN_ON(qp_grp->state != IB_QPS_RESET);
729         lockdep_assert_held(&qp_grp->vf->lock);
730
731         usnic_ib_sysfs_qpn_remove(qp_grp);
732         qp_grp_and_vf_unbind(qp_grp);
733         release_and_remove_all_flows(qp_grp);
734         free_qp_grp_res(qp_grp->res_chunk_list);
735         kfree(qp_grp);
736 }
737
738 struct usnic_vnic_res_chunk*
739 usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
740                                 enum usnic_vnic_res_type res_type)
741 {
742         int i;
743
744         for (i = 0; qp_grp->res_chunk_list[i]; i++) {
745                 if (qp_grp->res_chunk_list[i]->type == res_type)
746                         return qp_grp->res_chunk_list[i];
747         }
748
749         return ERR_PTR(-EINVAL);
750 }