2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
46 #include <rdma/rdma_user_cm.h>
47 #include <rdma/ib_marshall.h>
48 #include <rdma/rdma_cm.h>
49 #include <rdma/rdma_cm_ib.h>
51 MODULE_AUTHOR("Sean Hefty");
52 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
53 MODULE_LICENSE("Dual BSD/GPL");
55 static unsigned int max_backlog = 1024;
57 static struct ctl_table_header *ucma_ctl_table_hdr;
58 static ctl_table ucma_ctl_table[] = {
60 .procname = "max_backlog",
62 .maxlen = sizeof max_backlog,
64 .proc_handler = proc_dointvec,
72 struct list_head ctx_list;
73 struct list_head event_list;
74 wait_queue_head_t poll_wait;
79 struct completion comp;
84 struct ucma_file *file;
85 struct rdma_cm_id *cm_id;
88 struct list_head list;
89 struct list_head mc_list;
92 struct ucma_multicast {
93 struct ucma_context *ctx;
98 struct list_head list;
99 struct sockaddr_storage addr;
103 struct ucma_context *ctx;
104 struct ucma_multicast *mc;
105 struct list_head list;
106 struct rdma_cm_id *cm_id;
107 struct rdma_ucm_event_resp resp;
110 static DEFINE_MUTEX(mut);
111 static DEFINE_IDR(ctx_idr);
112 static DEFINE_IDR(multicast_idr);
114 static inline struct ucma_context *_ucma_find_context(int id,
115 struct ucma_file *file)
117 struct ucma_context *ctx;
119 ctx = idr_find(&ctx_idr, id);
121 ctx = ERR_PTR(-ENOENT);
122 else if (ctx->file != file)
123 ctx = ERR_PTR(-EINVAL);
127 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
129 struct ucma_context *ctx;
132 ctx = _ucma_find_context(id, file);
134 atomic_inc(&ctx->ref);
139 static void ucma_put_ctx(struct ucma_context *ctx)
141 if (atomic_dec_and_test(&ctx->ref))
142 complete(&ctx->comp);
145 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
147 struct ucma_context *ctx;
150 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
154 atomic_set(&ctx->ref, 1);
155 init_completion(&ctx->comp);
156 INIT_LIST_HEAD(&ctx->mc_list);
160 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
165 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
167 } while (ret == -EAGAIN);
172 list_add_tail(&ctx->list, &file->ctx_list);
180 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
182 struct ucma_multicast *mc;
185 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
190 ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
195 ret = idr_get_new(&multicast_idr, mc, &mc->id);
197 } while (ret == -EAGAIN);
203 list_add_tail(&mc->list, &ctx->mc_list);
211 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
212 struct rdma_conn_param *src)
214 if (src->private_data_len)
215 memcpy(dst->private_data, src->private_data,
216 src->private_data_len);
217 dst->private_data_len = src->private_data_len;
218 dst->responder_resources =src->responder_resources;
219 dst->initiator_depth = src->initiator_depth;
220 dst->flow_control = src->flow_control;
221 dst->retry_count = src->retry_count;
222 dst->rnr_retry_count = src->rnr_retry_count;
224 dst->qp_num = src->qp_num;
227 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
228 struct rdma_ud_param *src)
230 if (src->private_data_len)
231 memcpy(dst->private_data, src->private_data,
232 src->private_data_len);
233 dst->private_data_len = src->private_data_len;
234 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
235 dst->qp_num = src->qp_num;
236 dst->qkey = src->qkey;
239 static void ucma_set_event_context(struct ucma_context *ctx,
240 struct rdma_cm_event *event,
241 struct ucma_event *uevent)
244 switch (event->event) {
245 case RDMA_CM_EVENT_MULTICAST_JOIN:
246 case RDMA_CM_EVENT_MULTICAST_ERROR:
247 uevent->mc = (struct ucma_multicast *)
248 event->param.ud.private_data;
249 uevent->resp.uid = uevent->mc->uid;
250 uevent->resp.id = uevent->mc->id;
253 uevent->resp.uid = ctx->uid;
254 uevent->resp.id = ctx->id;
259 static int ucma_event_handler(struct rdma_cm_id *cm_id,
260 struct rdma_cm_event *event)
262 struct ucma_event *uevent;
263 struct ucma_context *ctx = cm_id->context;
266 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
268 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
270 mutex_lock(&ctx->file->mut);
271 uevent->cm_id = cm_id;
272 ucma_set_event_context(ctx, event, uevent);
273 uevent->resp.event = event->event;
274 uevent->resp.status = event->status;
275 if (cm_id->qp_type == IB_QPT_UD)
276 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
278 ucma_copy_conn_event(&uevent->resp.param.conn,
281 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
288 } else if (!ctx->uid) {
290 * We ignore events for new connections until userspace has set
291 * their context. This can only happen if an error occurs on a
292 * new connection before the user accepts it. This is okay,
293 * since the accept will just fail later.
299 list_add_tail(&uevent->list, &ctx->file->event_list);
300 wake_up_interruptible(&ctx->file->poll_wait);
302 mutex_unlock(&ctx->file->mut);
306 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
307 int in_len, int out_len)
309 struct ucma_context *ctx;
310 struct rdma_ucm_get_event cmd;
311 struct ucma_event *uevent;
314 if (out_len < sizeof uevent->resp)
317 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
320 mutex_lock(&file->mut);
321 while (list_empty(&file->event_list)) {
322 mutex_unlock(&file->mut);
324 if (file->filp->f_flags & O_NONBLOCK)
327 if (wait_event_interruptible(file->poll_wait,
328 !list_empty(&file->event_list)))
331 mutex_lock(&file->mut);
334 uevent = list_entry(file->event_list.next, struct ucma_event, list);
336 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
337 ctx = ucma_alloc_ctx(file);
342 uevent->ctx->backlog++;
343 ctx->cm_id = uevent->cm_id;
344 ctx->cm_id->context = ctx;
345 uevent->resp.id = ctx->id;
348 if (copy_to_user((void __user *)(unsigned long)cmd.response,
349 &uevent->resp, sizeof uevent->resp)) {
354 list_del(&uevent->list);
355 uevent->ctx->events_reported++;
357 uevent->mc->events_reported++;
360 mutex_unlock(&file->mut);
364 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
368 *qp_type = IB_QPT_RC;
372 *qp_type = IB_QPT_UD;
375 *qp_type = cmd->qp_type;
382 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
383 int in_len, int out_len)
385 struct rdma_ucm_create_id cmd;
386 struct rdma_ucm_create_id_resp resp;
387 struct ucma_context *ctx;
388 enum ib_qp_type qp_type;
391 if (out_len < sizeof(resp))
394 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
397 ret = ucma_get_qp_type(&cmd, &qp_type);
401 mutex_lock(&file->mut);
402 ctx = ucma_alloc_ctx(file);
403 mutex_unlock(&file->mut);
408 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
409 if (IS_ERR(ctx->cm_id)) {
410 ret = PTR_ERR(ctx->cm_id);
415 if (copy_to_user((void __user *)(unsigned long)cmd.response,
416 &resp, sizeof(resp))) {
423 rdma_destroy_id(ctx->cm_id);
426 idr_remove(&ctx_idr, ctx->id);
432 static void ucma_cleanup_multicast(struct ucma_context *ctx)
434 struct ucma_multicast *mc, *tmp;
437 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
439 idr_remove(&multicast_idr, mc->id);
445 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
447 struct ucma_event *uevent, *tmp;
449 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
450 if (uevent->mc != mc)
453 list_del(&uevent->list);
459 * We cannot hold file->mut when calling rdma_destroy_id() or we can
460 * deadlock. We also acquire file->mut in ucma_event_handler(), and
461 * rdma_destroy_id() will wait until all callbacks have completed.
463 static int ucma_free_ctx(struct ucma_context *ctx)
466 struct ucma_event *uevent, *tmp;
469 /* No new events will be generated after destroying the id. */
470 rdma_destroy_id(ctx->cm_id);
472 ucma_cleanup_multicast(ctx);
474 /* Cleanup events not yet reported to the user. */
475 mutex_lock(&ctx->file->mut);
476 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
477 if (uevent->ctx == ctx)
478 list_move_tail(&uevent->list, &list);
480 list_del(&ctx->list);
481 mutex_unlock(&ctx->file->mut);
483 list_for_each_entry_safe(uevent, tmp, &list, list) {
484 list_del(&uevent->list);
485 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
486 rdma_destroy_id(uevent->cm_id);
490 events_reported = ctx->events_reported;
492 return events_reported;
495 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
496 int in_len, int out_len)
498 struct rdma_ucm_destroy_id cmd;
499 struct rdma_ucm_destroy_id_resp resp;
500 struct ucma_context *ctx;
503 if (out_len < sizeof(resp))
506 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
510 ctx = _ucma_find_context(cmd.id, file);
512 idr_remove(&ctx_idr, ctx->id);
519 wait_for_completion(&ctx->comp);
520 resp.events_reported = ucma_free_ctx(ctx);
522 if (copy_to_user((void __user *)(unsigned long)cmd.response,
523 &resp, sizeof(resp)))
529 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
530 int in_len, int out_len)
532 struct rdma_ucm_bind_addr cmd;
533 struct ucma_context *ctx;
536 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
539 ctx = ucma_get_ctx(file, cmd.id);
543 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
548 static ssize_t ucma_resolve_addr(struct ucma_file *file,
549 const char __user *inbuf,
550 int in_len, int out_len)
552 struct rdma_ucm_resolve_addr cmd;
553 struct ucma_context *ctx;
556 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
559 ctx = ucma_get_ctx(file, cmd.id);
563 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
564 (struct sockaddr *) &cmd.dst_addr,
570 static ssize_t ucma_resolve_route(struct ucma_file *file,
571 const char __user *inbuf,
572 int in_len, int out_len)
574 struct rdma_ucm_resolve_route cmd;
575 struct ucma_context *ctx;
578 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
581 ctx = ucma_get_ctx(file, cmd.id);
585 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
590 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
591 struct rdma_route *route)
593 struct rdma_dev_addr *dev_addr;
595 resp->num_paths = route->num_paths;
596 switch (route->num_paths) {
598 dev_addr = &route->addr.dev_addr;
599 rdma_addr_get_dgid(dev_addr,
600 (union ib_gid *) &resp->ib_route[0].dgid);
601 rdma_addr_get_sgid(dev_addr,
602 (union ib_gid *) &resp->ib_route[0].sgid);
603 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
606 ib_copy_path_rec_to_user(&resp->ib_route[1],
607 &route->path_rec[1]);
610 ib_copy_path_rec_to_user(&resp->ib_route[0],
611 &route->path_rec[0]);
618 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
619 struct rdma_route *route)
621 struct rdma_dev_addr *dev_addr;
622 struct net_device *dev;
625 resp->num_paths = route->num_paths;
626 switch (route->num_paths) {
628 dev_addr = &route->addr.dev_addr;
629 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
631 vid = rdma_vlan_dev_vlan_id(dev);
635 iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
636 dev_addr->dst_dev_addr, vid);
637 iboe_addr_get_sgid(dev_addr,
638 (union ib_gid *) &resp->ib_route[0].sgid);
639 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
642 ib_copy_path_rec_to_user(&resp->ib_route[1],
643 &route->path_rec[1]);
646 ib_copy_path_rec_to_user(&resp->ib_route[0],
647 &route->path_rec[0]);
654 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
655 struct rdma_route *route)
657 struct rdma_dev_addr *dev_addr;
659 dev_addr = &route->addr.dev_addr;
660 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
661 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
664 static ssize_t ucma_query_route(struct ucma_file *file,
665 const char __user *inbuf,
666 int in_len, int out_len)
668 struct rdma_ucm_query_route cmd;
669 struct rdma_ucm_query_route_resp resp;
670 struct ucma_context *ctx;
671 struct sockaddr *addr;
674 if (out_len < sizeof(resp))
677 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
680 ctx = ucma_get_ctx(file, cmd.id);
684 memset(&resp, 0, sizeof resp);
685 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
686 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
687 sizeof(struct sockaddr_in) :
688 sizeof(struct sockaddr_in6));
689 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
690 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
691 sizeof(struct sockaddr_in) :
692 sizeof(struct sockaddr_in6));
693 if (!ctx->cm_id->device)
696 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
697 resp.port_num = ctx->cm_id->port_num;
698 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
699 case RDMA_TRANSPORT_IB:
700 switch (rdma_port_get_link_layer(ctx->cm_id->device,
701 ctx->cm_id->port_num)) {
702 case IB_LINK_LAYER_INFINIBAND:
703 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
705 case IB_LINK_LAYER_ETHERNET:
706 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
712 case RDMA_TRANSPORT_IWARP:
713 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
720 if (copy_to_user((void __user *)(unsigned long)cmd.response,
721 &resp, sizeof(resp)))
728 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
729 struct rdma_ucm_conn_param *src)
731 dst->private_data = src->private_data;
732 dst->private_data_len = src->private_data_len;
733 dst->responder_resources =src->responder_resources;
734 dst->initiator_depth = src->initiator_depth;
735 dst->flow_control = src->flow_control;
736 dst->retry_count = src->retry_count;
737 dst->rnr_retry_count = src->rnr_retry_count;
739 dst->qp_num = src->qp_num;
742 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
743 int in_len, int out_len)
745 struct rdma_ucm_connect cmd;
746 struct rdma_conn_param conn_param;
747 struct ucma_context *ctx;
750 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
753 if (!cmd.conn_param.valid)
756 ctx = ucma_get_ctx(file, cmd.id);
760 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
761 ret = rdma_connect(ctx->cm_id, &conn_param);
766 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
767 int in_len, int out_len)
769 struct rdma_ucm_listen cmd;
770 struct ucma_context *ctx;
773 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
776 ctx = ucma_get_ctx(file, cmd.id);
780 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
781 cmd.backlog : max_backlog;
782 ret = rdma_listen(ctx->cm_id, ctx->backlog);
787 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
788 int in_len, int out_len)
790 struct rdma_ucm_accept cmd;
791 struct rdma_conn_param conn_param;
792 struct ucma_context *ctx;
795 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
798 ctx = ucma_get_ctx(file, cmd.id);
802 if (cmd.conn_param.valid) {
803 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
804 mutex_lock(&file->mut);
805 ret = rdma_accept(ctx->cm_id, &conn_param);
808 mutex_unlock(&file->mut);
810 ret = rdma_accept(ctx->cm_id, NULL);
816 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
817 int in_len, int out_len)
819 struct rdma_ucm_reject cmd;
820 struct ucma_context *ctx;
823 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
826 ctx = ucma_get_ctx(file, cmd.id);
830 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
835 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
836 int in_len, int out_len)
838 struct rdma_ucm_disconnect cmd;
839 struct ucma_context *ctx;
842 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
845 ctx = ucma_get_ctx(file, cmd.id);
849 ret = rdma_disconnect(ctx->cm_id);
854 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
855 const char __user *inbuf,
856 int in_len, int out_len)
858 struct rdma_ucm_init_qp_attr cmd;
859 struct ib_uverbs_qp_attr resp;
860 struct ucma_context *ctx;
861 struct ib_qp_attr qp_attr;
864 if (out_len < sizeof(resp))
867 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
870 ctx = ucma_get_ctx(file, cmd.id);
874 resp.qp_attr_mask = 0;
875 memset(&qp_attr, 0, sizeof qp_attr);
876 qp_attr.qp_state = cmd.qp_state;
877 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
881 ib_copy_qp_attr_to_user(&resp, &qp_attr);
882 if (copy_to_user((void __user *)(unsigned long)cmd.response,
883 &resp, sizeof(resp)))
891 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
892 void *optval, size_t optlen)
897 case RDMA_OPTION_ID_TOS:
898 if (optlen != sizeof(u8)) {
902 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
904 case RDMA_OPTION_ID_REUSEADDR:
905 if (optlen != sizeof(int)) {
909 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
911 case RDMA_OPTION_ID_AFONLY:
912 if (optlen != sizeof(int)) {
916 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
925 static int ucma_set_ib_path(struct ucma_context *ctx,
926 struct ib_path_rec_data *path_data, size_t optlen)
928 struct ib_sa_path_rec sa_path;
929 struct rdma_cm_event event;
932 if (optlen % sizeof(*path_data))
935 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
936 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
937 IB_PATH_BIDIRECTIONAL))
944 ib_sa_unpack_path(path_data->path_rec, &sa_path);
945 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
949 memset(&event, 0, sizeof event);
950 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
951 return ucma_event_handler(ctx->cm_id, &event);
954 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
955 void *optval, size_t optlen)
960 case RDMA_OPTION_IB_PATH:
961 ret = ucma_set_ib_path(ctx, optval, optlen);
970 static int ucma_set_option_level(struct ucma_context *ctx, int level,
971 int optname, void *optval, size_t optlen)
977 ret = ucma_set_option_id(ctx, optname, optval, optlen);
980 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
989 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
990 int in_len, int out_len)
992 struct rdma_ucm_set_option cmd;
993 struct ucma_context *ctx;
997 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1000 ctx = ucma_get_ctx(file, cmd.id);
1002 return PTR_ERR(ctx);
1004 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1006 if (IS_ERR(optval)) {
1007 ret = PTR_ERR(optval);
1011 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1020 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1021 int in_len, int out_len)
1023 struct rdma_ucm_notify cmd;
1024 struct ucma_context *ctx;
1027 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1030 ctx = ucma_get_ctx(file, cmd.id);
1032 return PTR_ERR(ctx);
1034 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1039 static ssize_t ucma_join_multicast(struct ucma_file *file,
1040 const char __user *inbuf,
1041 int in_len, int out_len)
1043 struct rdma_ucm_join_mcast cmd;
1044 struct rdma_ucm_create_id_resp resp;
1045 struct ucma_context *ctx;
1046 struct ucma_multicast *mc;
1049 if (out_len < sizeof(resp))
1052 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1055 ctx = ucma_get_ctx(file, cmd.id);
1057 return PTR_ERR(ctx);
1059 mutex_lock(&file->mut);
1060 mc = ucma_alloc_multicast(ctx);
1067 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
1068 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1073 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1074 &resp, sizeof(resp))) {
1079 mutex_unlock(&file->mut);
1084 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1085 ucma_cleanup_mc_events(mc);
1088 idr_remove(&multicast_idr, mc->id);
1090 list_del(&mc->list);
1093 mutex_unlock(&file->mut);
1098 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1099 const char __user *inbuf,
1100 int in_len, int out_len)
1102 struct rdma_ucm_destroy_id cmd;
1103 struct rdma_ucm_destroy_id_resp resp;
1104 struct ucma_multicast *mc;
1107 if (out_len < sizeof(resp))
1110 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1114 mc = idr_find(&multicast_idr, cmd.id);
1116 mc = ERR_PTR(-ENOENT);
1117 else if (mc->ctx->file != file)
1118 mc = ERR_PTR(-EINVAL);
1120 idr_remove(&multicast_idr, mc->id);
1121 atomic_inc(&mc->ctx->ref);
1130 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1131 mutex_lock(&mc->ctx->file->mut);
1132 ucma_cleanup_mc_events(mc);
1133 list_del(&mc->list);
1134 mutex_unlock(&mc->ctx->file->mut);
1136 ucma_put_ctx(mc->ctx);
1137 resp.events_reported = mc->events_reported;
1140 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1141 &resp, sizeof(resp)))
1147 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1149 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1150 if (file1 < file2) {
1151 mutex_lock(&file1->mut);
1152 mutex_lock(&file2->mut);
1154 mutex_lock(&file2->mut);
1155 mutex_lock(&file1->mut);
1159 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1161 if (file1 < file2) {
1162 mutex_unlock(&file2->mut);
1163 mutex_unlock(&file1->mut);
1165 mutex_unlock(&file1->mut);
1166 mutex_unlock(&file2->mut);
1170 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1172 struct ucma_event *uevent, *tmp;
1174 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1175 if (uevent->ctx == ctx)
1176 list_move_tail(&uevent->list, &file->event_list);
1179 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1180 const char __user *inbuf,
1181 int in_len, int out_len)
1183 struct rdma_ucm_migrate_id cmd;
1184 struct rdma_ucm_migrate_resp resp;
1185 struct ucma_context *ctx;
1187 struct ucma_file *cur_file;
1190 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1193 /* Get current fd to protect against it being closed */
1198 /* Validate current fd and prevent destruction of id. */
1199 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1205 cur_file = ctx->file;
1206 if (cur_file == new_file) {
1207 resp.events_reported = ctx->events_reported;
1212 * Migrate events between fd's, maintaining order, and avoiding new
1213 * events being added before existing events.
1215 ucma_lock_files(cur_file, new_file);
1218 list_move_tail(&ctx->list, &new_file->ctx_list);
1219 ucma_move_events(ctx, new_file);
1220 ctx->file = new_file;
1221 resp.events_reported = ctx->events_reported;
1224 ucma_unlock_files(cur_file, new_file);
1227 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1228 &resp, sizeof(resp)))
1237 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1238 const char __user *inbuf,
1239 int in_len, int out_len) = {
1240 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1241 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1242 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
1243 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1244 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1245 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1246 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1247 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1248 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1249 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1250 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1251 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1252 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1253 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1254 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1255 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1256 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
1257 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1258 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
1261 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1262 size_t len, loff_t *pos)
1264 struct ucma_file *file = filp->private_data;
1265 struct rdma_ucm_cmd_hdr hdr;
1268 if (len < sizeof(hdr))
1271 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1274 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1277 if (hdr.in + sizeof(hdr) > len)
1280 if (!ucma_cmd_table[hdr.cmd])
1283 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1290 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1292 struct ucma_file *file = filp->private_data;
1293 unsigned int mask = 0;
1295 poll_wait(filp, &file->poll_wait, wait);
1297 if (!list_empty(&file->event_list))
1298 mask = POLLIN | POLLRDNORM;
1304 * ucma_open() does not need the BKL:
1306 * - no global state is referred to;
1307 * - there is no ioctl method to race against;
1308 * - no further module initialization is required for open to work
1309 * after the device is registered.
1311 static int ucma_open(struct inode *inode, struct file *filp)
1313 struct ucma_file *file;
1315 file = kmalloc(sizeof *file, GFP_KERNEL);
1319 INIT_LIST_HEAD(&file->event_list);
1320 INIT_LIST_HEAD(&file->ctx_list);
1321 init_waitqueue_head(&file->poll_wait);
1322 mutex_init(&file->mut);
1324 filp->private_data = file;
1327 return nonseekable_open(inode, filp);
1330 static int ucma_close(struct inode *inode, struct file *filp)
1332 struct ucma_file *file = filp->private_data;
1333 struct ucma_context *ctx, *tmp;
1335 mutex_lock(&file->mut);
1336 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1337 mutex_unlock(&file->mut);
1340 idr_remove(&ctx_idr, ctx->id);
1344 mutex_lock(&file->mut);
1346 mutex_unlock(&file->mut);
1351 static const struct file_operations ucma_fops = {
1352 .owner = THIS_MODULE,
1354 .release = ucma_close,
1355 .write = ucma_write,
1357 .llseek = no_llseek,
1360 static struct miscdevice ucma_misc = {
1361 .minor = MISC_DYNAMIC_MINOR,
1363 .nodename = "infiniband/rdma_cm",
1368 static ssize_t show_abi_version(struct device *dev,
1369 struct device_attribute *attr,
1372 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1374 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1376 static int __init ucma_init(void)
1380 ret = misc_register(&ucma_misc);
1384 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1386 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1390 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1391 if (!ucma_ctl_table_hdr) {
1392 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1398 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1400 misc_deregister(&ucma_misc);
1404 static void __exit ucma_cleanup(void)
1406 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1407 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1408 misc_deregister(&ucma_misc);
1409 idr_destroy(&ctx_idr);
1412 module_init(ucma_init);
1413 module_exit(ucma_cleanup);