2 * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
18 #include <linux/bug.h>
19 #include <linux/errno.h>
20 #include <linux/module.h>
21 #include <linux/spinlock.h>
23 #include "usnic_log.h"
24 #include "usnic_vnic.h"
25 #include "usnic_fwd.h"
26 #include "usnic_uiom.h"
27 #include "usnic_ib_qp_grp.h"
28 #include "usnic_ib_sysfs.h"
29 #include "usnic_transport.h"
33 const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
51 return "UNKOWN STATE";
56 int usnic_ib_qp_grp_dump_hdr(char *buf, int buf_sz)
58 return scnprintf(buf, buf_sz, "|QPN\t|State\t|PID\t|VF Idx\t|Fil ID");
61 int usnic_ib_qp_grp_dump_rows(void *obj, char *buf, int buf_sz)
63 struct usnic_ib_qp_grp *qp_grp = obj;
64 struct usnic_ib_qp_grp_flow *default_flow;
66 default_flow = list_first_entry(&qp_grp->flows_lst,
67 struct usnic_ib_qp_grp_flow, link);
68 return scnprintf(buf, buf_sz, "|%d\t|%s\t|%d\t|%hu\t|%d",
70 usnic_ib_qp_grp_state_to_string(
73 usnic_vnic_get_index(qp_grp->vf->vnic),
74 default_flow->flow->flow_id);
76 return scnprintf(buf, buf_sz, "|N/A\t|N/A\t|N/A\t|N/A\t|N/A");
80 static struct usnic_vnic_res_chunk *
81 get_qp_res_chunk(struct usnic_ib_qp_grp *qp_grp)
83 lockdep_assert_held(&qp_grp->lock);
85 * The QP res chunk, used to derive qp indices,
86 * are just indices of the RQs
88 return usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
91 static int enable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
96 struct usnic_vnic_res_chunk *res_chunk;
97 struct usnic_vnic_res *res;
99 lockdep_assert_held(&qp_grp->lock);
101 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
103 res_chunk = get_qp_res_chunk(qp_grp);
104 if (IS_ERR_OR_NULL(res_chunk)) {
105 usnic_err("Unable to get qp res with err %ld\n",
107 return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
110 for (i = 0; i < res_chunk->cnt; i++) {
111 res = res_chunk->res[i];
112 status = usnic_fwd_enable_qp(qp_grp->ufdev, vnic_idx,
115 usnic_err("Failed to enable qp %d of %s:%d\n with err %d\n",
116 res->vnic_idx, qp_grp->ufdev->name,
125 for (i--; i >= 0; i--) {
126 res = res_chunk->res[i];
127 usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
134 static int disable_qp_grp(struct usnic_ib_qp_grp *qp_grp)
137 struct usnic_vnic_res_chunk *res_chunk;
138 struct usnic_vnic_res *res;
141 lockdep_assert_held(&qp_grp->lock);
142 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
144 res_chunk = get_qp_res_chunk(qp_grp);
145 if (IS_ERR_OR_NULL(res_chunk)) {
146 usnic_err("Unable to get qp res with err %ld\n",
148 return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
151 for (i = 0; i < res_chunk->cnt; i++) {
152 res = res_chunk->res[i];
153 status = usnic_fwd_disable_qp(qp_grp->ufdev, vnic_idx,
156 usnic_err("Failed to disable rq %d of %s:%d\n with err %d\n",
167 static int init_filter_action(struct usnic_ib_qp_grp *qp_grp,
168 struct usnic_filter_action *uaction)
170 struct usnic_vnic_res_chunk *res_chunk;
172 res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
173 if (IS_ERR_OR_NULL(res_chunk)) {
174 usnic_err("Unable to get %s with err %ld\n",
175 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
177 return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM;
180 uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
181 uaction->action.type = FILTER_ACTION_RQ_STEERING;
182 uaction->action.u.rq_idx = res_chunk->res[DFLT_RQ_IDX]->vnic_idx;
187 static struct usnic_ib_qp_grp_flow*
188 create_roce_custom_flow(struct usnic_ib_qp_grp *qp_grp,
189 struct usnic_transport_spec *trans_spec)
193 struct filter filter;
194 struct usnic_filter_action uaction;
195 struct usnic_ib_qp_grp_flow *qp_flow;
196 struct usnic_fwd_flow *flow;
197 enum usnic_transport_type trans_type;
199 trans_type = trans_spec->trans_type;
200 port_num = trans_spec->usnic_roce.port_num;
203 port_num = usnic_transport_rsrv_port(trans_type, port_num);
205 return ERR_PTR(-EINVAL);
208 usnic_fwd_init_usnic_filter(&filter, port_num);
209 err = init_filter_action(qp_grp, &uaction);
211 goto out_unreserve_port;
213 flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
214 if (IS_ERR_OR_NULL(flow)) {
215 usnic_err("Unable to alloc flow failed with err %ld\n",
217 err = (flow) ? PTR_ERR(flow) : -EFAULT;
218 goto out_unreserve_port;
221 /* Create Flow Handle */
222 qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
223 if (IS_ERR_OR_NULL(qp_flow)) {
224 err = (qp_flow) ? PTR_ERR(qp_flow) : -ENOMEM;
225 goto out_dealloc_flow;
227 qp_flow->flow = flow;
228 qp_flow->trans_type = trans_type;
229 qp_flow->usnic_roce.port_num = port_num;
230 qp_flow->qp_grp = qp_grp;
234 usnic_fwd_dealloc_flow(flow);
236 usnic_transport_unrsrv_port(trans_type, port_num);
240 static void release_roce_custom_flow(struct usnic_ib_qp_grp_flow *qp_flow)
242 usnic_fwd_dealloc_flow(qp_flow->flow);
243 usnic_transport_unrsrv_port(qp_flow->trans_type,
244 qp_flow->usnic_roce.port_num);
248 static struct usnic_ib_qp_grp_flow*
249 create_udp_flow(struct usnic_ib_qp_grp *qp_grp,
250 struct usnic_transport_spec *trans_spec)
255 struct filter filter;
256 struct usnic_filter_action uaction;
257 struct usnic_ib_qp_grp_flow *qp_flow;
258 struct usnic_fwd_flow *flow;
259 enum usnic_transport_type trans_type;
264 trans_type = trans_spec->trans_type;
265 sock_fd = trans_spec->udp.sock_fd;
267 /* Get and check socket */
268 sock = usnic_transport_get_socket(sock_fd);
269 if (IS_ERR_OR_NULL(sock))
270 return ERR_CAST(sock);
272 err = usnic_transport_sock_get_addr(sock, &proto, &addr, &port_num);
276 if (proto != IPPROTO_UDP) {
277 usnic_err("Protocol for fd %d is not UDP", sock_fd);
283 usnic_fwd_init_udp_filter(&filter, addr, port_num);
284 err = init_filter_action(qp_grp, &uaction);
288 flow = usnic_fwd_alloc_flow(qp_grp->ufdev, &filter, &uaction);
289 if (IS_ERR_OR_NULL(flow)) {
290 usnic_err("Unable to alloc flow failed with err %ld\n",
292 err = (flow) ? PTR_ERR(flow) : -EFAULT;
297 qp_flow = kzalloc(sizeof(*qp_flow), GFP_ATOMIC);
298 if (IS_ERR_OR_NULL(qp_flow)) {
299 err = (qp_flow) ? PTR_ERR(qp_flow) : -ENOMEM;
300 goto out_dealloc_flow;
302 qp_flow->flow = flow;
303 qp_flow->trans_type = trans_type;
304 qp_flow->udp.sock = sock;
305 qp_flow->qp_grp = qp_grp;
309 usnic_fwd_dealloc_flow(flow);
311 usnic_transport_put_socket(sock);
315 static void release_udp_flow(struct usnic_ib_qp_grp_flow *qp_flow)
317 usnic_fwd_dealloc_flow(qp_flow->flow);
318 usnic_transport_put_socket(qp_flow->udp.sock);
322 static struct usnic_ib_qp_grp_flow*
323 create_and_add_flow(struct usnic_ib_qp_grp *qp_grp,
324 struct usnic_transport_spec *trans_spec)
326 struct usnic_ib_qp_grp_flow *qp_flow;
327 enum usnic_transport_type trans_type;
329 trans_type = trans_spec->trans_type;
330 switch (trans_type) {
331 case USNIC_TRANSPORT_ROCE_CUSTOM:
332 qp_flow = create_roce_custom_flow(qp_grp, trans_spec);
334 case USNIC_TRANSPORT_IPV4_UDP:
335 qp_flow = create_udp_flow(qp_grp, trans_spec);
338 usnic_err("Unsupported transport %u\n",
339 trans_spec->trans_type);
340 return ERR_PTR(-EINVAL);
343 if (!IS_ERR_OR_NULL(qp_flow))
344 list_add_tail(&qp_flow->link, &qp_grp->flows_lst);
350 static void release_and_remove_flow(struct usnic_ib_qp_grp_flow *qp_flow)
352 list_del(&qp_flow->link);
354 switch (qp_flow->trans_type) {
355 case USNIC_TRANSPORT_ROCE_CUSTOM:
356 release_roce_custom_flow(qp_flow);
358 case USNIC_TRANSPORT_IPV4_UDP:
359 release_udp_flow(qp_flow);
362 WARN(1, "Unsupported transport %u\n",
363 qp_flow->trans_type);
368 static void release_and_remove_all_flows(struct usnic_ib_qp_grp *qp_grp)
370 struct usnic_ib_qp_grp_flow *qp_flow, *tmp;
371 list_for_each_entry_safe(qp_flow, tmp, &qp_grp->flows_lst, link)
372 release_and_remove_flow(qp_flow);
375 int usnic_ib_qp_grp_modify(struct usnic_ib_qp_grp *qp_grp,
376 enum ib_qp_state new_state,
381 struct ib_event ib_event;
382 enum ib_qp_state old_state;
383 struct usnic_transport_spec *trans_spec;
384 struct usnic_ib_qp_grp_flow *qp_flow;
386 old_state = qp_grp->state;
387 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
388 trans_spec = (struct usnic_transport_spec *) data;
390 spin_lock(&qp_grp->lock);
398 release_and_remove_all_flows(qp_grp);
404 status = disable_qp_grp(qp_grp);
405 release_and_remove_all_flows(qp_grp);
415 qp_flow = create_and_add_flow(qp_grp,
417 if (IS_ERR_OR_NULL(qp_flow)) {
418 status = (qp_flow) ? PTR_ERR(qp_flow) : -EFAULT;
423 * Optional to specify filters.
430 qp_flow = create_and_add_flow(qp_grp,
432 if (IS_ERR_OR_NULL(qp_flow)) {
433 status = (qp_flow) ? PTR_ERR(qp_flow) : -EFAULT;
438 * Doesn't make sense to go into INIT state
439 * from INIT state w/o adding filters.
445 status = disable_qp_grp(qp_grp);
448 status = disable_qp_grp(qp_grp);
457 status = enable_qp_grp(qp_grp);
473 ib_event.device = &qp_grp->vf->pf->ib_dev;
474 ib_event.element.qp = &qp_grp->ibqp;
475 ib_event.event = IB_EVENT_QP_FATAL;
479 qp_grp->ibqp.event_handler(&ib_event,
480 qp_grp->ibqp.qp_context);
483 release_and_remove_all_flows(qp_grp);
484 qp_grp->ibqp.event_handler(&ib_event,
485 qp_grp->ibqp.qp_context);
489 status = disable_qp_grp(qp_grp);
490 release_and_remove_all_flows(qp_grp);
491 qp_grp->ibqp.event_handler(&ib_event,
492 qp_grp->ibqp.qp_context);
501 spin_unlock(&qp_grp->lock);
504 qp_grp->state = new_state;
505 usnic_info("Transistioned %u from %s to %s",
507 usnic_ib_qp_grp_state_to_string(old_state),
508 usnic_ib_qp_grp_state_to_string(new_state));
510 usnic_err("Failed to transistion %u from %s to %s",
512 usnic_ib_qp_grp_state_to_string(old_state),
513 usnic_ib_qp_grp_state_to_string(new_state));
519 static struct usnic_vnic_res_chunk**
520 alloc_res_chunk_list(struct usnic_vnic *vnic,
521 struct usnic_vnic_res_spec *res_spec, void *owner_obj)
523 enum usnic_vnic_res_type res_type;
524 struct usnic_vnic_res_chunk **res_chunk_list;
525 int err, i, res_cnt, res_lst_sz;
528 res_spec->resources[res_lst_sz].type != USNIC_VNIC_RES_TYPE_EOL;
533 res_chunk_list = kzalloc(sizeof(*res_chunk_list)*(res_lst_sz+1),
536 return ERR_PTR(-ENOMEM);
538 for (i = 0; res_spec->resources[i].type != USNIC_VNIC_RES_TYPE_EOL;
540 res_type = res_spec->resources[i].type;
541 res_cnt = res_spec->resources[i].cnt;
543 res_chunk_list[i] = usnic_vnic_get_resources(vnic, res_type,
545 if (IS_ERR_OR_NULL(res_chunk_list[i])) {
546 err = (res_chunk_list[i] ?
547 PTR_ERR(res_chunk_list[i]) : -ENOMEM);
548 usnic_err("Failed to get %s from %s with err %d\n",
549 usnic_vnic_res_type_to_str(res_type),
550 usnic_vnic_pci_name(vnic),
556 return res_chunk_list;
559 for (i--; i > 0; i--)
560 usnic_vnic_put_resources(res_chunk_list[i]);
561 kfree(res_chunk_list);
565 static void free_qp_grp_res(struct usnic_vnic_res_chunk **res_chunk_list)
568 for (i = 0; res_chunk_list[i]; i++)
569 usnic_vnic_put_resources(res_chunk_list[i]);
570 kfree(res_chunk_list);
573 static int qp_grp_and_vf_bind(struct usnic_ib_vf *vf,
574 struct usnic_ib_pd *pd,
575 struct usnic_ib_qp_grp *qp_grp)
578 struct pci_dev *pdev;
580 lockdep_assert_held(&vf->lock);
582 pdev = usnic_vnic_get_pdev(vf->vnic);
583 if (vf->qp_grp_ref_cnt == 0) {
584 err = usnic_uiom_attach_dev_to_pd(pd->umem_pd, &pdev->dev);
586 usnic_err("Failed to attach %s to domain\n",
592 vf->qp_grp_ref_cnt++;
594 WARN_ON(vf->pd != pd);
600 static void qp_grp_and_vf_unbind(struct usnic_ib_qp_grp *qp_grp)
602 struct pci_dev *pdev;
603 struct usnic_ib_pd *pd;
605 lockdep_assert_held(&qp_grp->vf->lock);
608 pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
609 if (--qp_grp->vf->qp_grp_ref_cnt == 0) {
610 qp_grp->vf->pd = NULL;
611 usnic_uiom_detach_dev_from_pd(pd->umem_pd, &pdev->dev);
616 static void log_spec(struct usnic_vnic_res_spec *res_spec)
619 usnic_vnic_spec_dump(buf, sizeof(buf), res_spec);
620 usnic_dbg("%s\n", buf);
623 static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
626 enum usnic_transport_type trans_type = qp_flow->trans_type;
629 switch (trans_type) {
630 case USNIC_TRANSPORT_ROCE_CUSTOM:
631 *id = qp_flow->usnic_roce.port_num;
633 case USNIC_TRANSPORT_IPV4_UDP:
634 err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
641 usnic_err("Unsupported transport %u\n", trans_type);
648 struct usnic_ib_qp_grp *
649 usnic_ib_qp_grp_create(struct usnic_fwd_dev *ufdev, struct usnic_ib_vf *vf,
650 struct usnic_ib_pd *pd,
651 struct usnic_vnic_res_spec *res_spec,
652 struct usnic_transport_spec *transport_spec)
654 struct usnic_ib_qp_grp *qp_grp;
656 enum usnic_transport_type transport = transport_spec->trans_type;
657 struct usnic_ib_qp_grp_flow *qp_flow;
659 lockdep_assert_held(&vf->lock);
661 err = usnic_vnic_res_spec_satisfied(&min_transport_spec[transport],
664 usnic_err("Spec does not meet miniumum req for transport %d\n",
670 qp_grp = kzalloc(sizeof(*qp_grp), GFP_ATOMIC);
672 usnic_err("Unable to alloc qp_grp - Out of memory\n");
676 qp_grp->res_chunk_list = alloc_res_chunk_list(vf->vnic, res_spec,
678 if (IS_ERR_OR_NULL(qp_grp->res_chunk_list)) {
679 err = qp_grp->res_chunk_list ?
680 PTR_ERR(qp_grp->res_chunk_list) : -ENOMEM;
681 usnic_err("Unable to alloc res for %d with err %d\n",
682 qp_grp->grp_id, err);
683 goto out_free_qp_grp;
686 err = qp_grp_and_vf_bind(vf, pd, qp_grp);
690 INIT_LIST_HEAD(&qp_grp->flows_lst);
691 spin_lock_init(&qp_grp->lock);
692 qp_grp->ufdev = ufdev;
693 qp_grp->state = IB_QPS_RESET;
694 qp_grp->owner_pid = current->pid;
696 qp_flow = create_and_add_flow(qp_grp, transport_spec);
697 if (IS_ERR_OR_NULL(qp_flow)) {
698 usnic_err("Unable to create and add flow with err %ld\n",
700 err = (qp_flow) ? PTR_ERR(qp_flow) : -EFAULT;
701 goto out_qp_grp_vf_unbind;
704 err = qp_grp_id_from_flow(qp_flow, &qp_grp->grp_id);
706 goto out_release_flow;
707 qp_grp->ibqp.qp_num = qp_grp->grp_id;
709 usnic_ib_sysfs_qpn_add(qp_grp);
714 release_and_remove_flow(qp_flow);
715 out_qp_grp_vf_unbind:
716 qp_grp_and_vf_unbind(qp_grp);
718 free_qp_grp_res(qp_grp->res_chunk_list);
725 void usnic_ib_qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
728 WARN_ON(qp_grp->state != IB_QPS_RESET);
729 lockdep_assert_held(&qp_grp->vf->lock);
731 usnic_ib_sysfs_qpn_remove(qp_grp);
732 qp_grp_and_vf_unbind(qp_grp);
733 release_and_remove_all_flows(qp_grp);
734 free_qp_grp_res(qp_grp->res_chunk_list);
738 struct usnic_vnic_res_chunk*
739 usnic_ib_qp_grp_get_chunk(struct usnic_ib_qp_grp *qp_grp,
740 enum usnic_vnic_res_type res_type)
744 for (i = 0; qp_grp->res_chunk_list[i]; i++) {
745 if (qp_grp->res_chunk_list[i]->type == res_type)
746 return qp_grp->res_chunk_list[i];
749 return ERR_PTR(-EINVAL);