2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
40 #include <asm/uaccess.h>
44 struct uverbs_lock_class {
45 struct lock_class_key key;
49 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
50 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
51 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
52 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
53 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
54 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
55 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
56 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
57 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
59 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
61 (udata)->inbuf = (void __user *) (ibuf); \
62 (udata)->outbuf = (void __user *) (obuf); \
63 (udata)->inlen = (ilen); \
64 (udata)->outlen = (olen); \
68 * The ib_uobject locking scheme is as follows:
70 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
71 * needs to be held during all idr operations. When an object is
72 * looked up, a reference must be taken on the object's kref before
75 * - Each object also has an rwsem. This rwsem must be held for
76 * reading while an operation that uses the object is performed.
77 * For example, while registering an MR, the associated PD's
78 * uobject.mutex must be held for reading. The rwsem must be held
79 * for writing while initializing or destroying an object.
81 * - In addition, each object has a "live" flag. If this flag is not
82 * set, then lookups of the object will fail even if it is found in
83 * the idr. This handles a reader that blocks and does not acquire
84 * the rwsem until after the object is destroyed. The destroy
85 * operation will set the live flag to 0 and then drop the rwsem;
86 * this will allow the reader to acquire the rwsem, see that the
87 * live flag is 0, and then drop the rwsem and its reference to
88 * object. The underlying storage will not be freed until the last
89 * reference to the object is dropped.
92 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
93 struct ib_ucontext *context, struct uverbs_lock_class *c)
95 uobj->user_handle = user_handle;
96 uobj->context = context;
97 kref_init(&uobj->ref);
98 init_rwsem(&uobj->mutex);
99 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
103 static void release_uobj(struct kref *kref)
105 kfree(container_of(kref, struct ib_uobject, ref));
108 static void put_uobj(struct ib_uobject *uobj)
110 kref_put(&uobj->ref, release_uobj);
113 static void put_uobj_read(struct ib_uobject *uobj)
115 up_read(&uobj->mutex);
119 static void put_uobj_write(struct ib_uobject *uobj)
121 up_write(&uobj->mutex);
125 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
129 idr_preload(GFP_KERNEL);
130 spin_lock(&ib_uverbs_idr_lock);
132 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
136 spin_unlock(&ib_uverbs_idr_lock);
139 return ret < 0 ? ret : 0;
142 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
144 spin_lock(&ib_uverbs_idr_lock);
145 idr_remove(idr, uobj->id);
146 spin_unlock(&ib_uverbs_idr_lock);
149 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
150 struct ib_ucontext *context)
152 struct ib_uobject *uobj;
154 spin_lock(&ib_uverbs_idr_lock);
155 uobj = idr_find(idr, id);
157 if (uobj->context == context)
158 kref_get(&uobj->ref);
162 spin_unlock(&ib_uverbs_idr_lock);
167 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
168 struct ib_ucontext *context, int nested)
170 struct ib_uobject *uobj;
172 uobj = __idr_get_uobj(idr, id, context);
177 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
179 down_read(&uobj->mutex);
188 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
189 struct ib_ucontext *context)
191 struct ib_uobject *uobj;
193 uobj = __idr_get_uobj(idr, id, context);
197 down_write(&uobj->mutex);
199 put_uobj_write(uobj);
206 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
209 struct ib_uobject *uobj;
211 uobj = idr_read_uobj(idr, id, context, nested);
212 return uobj ? uobj->object : NULL;
215 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
217 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
220 static void put_pd_read(struct ib_pd *pd)
222 put_uobj_read(pd->uobject);
225 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
227 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
230 static void put_cq_read(struct ib_cq *cq)
232 put_uobj_read(cq->uobject);
235 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
237 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
240 static void put_ah_read(struct ib_ah *ah)
242 put_uobj_read(ah->uobject);
245 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
247 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
250 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
252 struct ib_uobject *uobj;
254 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
255 return uobj ? uobj->object : NULL;
258 static void put_qp_read(struct ib_qp *qp)
260 put_uobj_read(qp->uobject);
263 static void put_qp_write(struct ib_qp *qp)
265 put_uobj_write(qp->uobject);
268 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
270 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
273 static void put_srq_read(struct ib_srq *srq)
275 put_uobj_read(srq->uobject);
278 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
279 struct ib_uobject **uobj)
281 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
282 return *uobj ? (*uobj)->object : NULL;
285 static void put_xrcd_read(struct ib_uobject *uobj)
290 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
291 const char __user *buf,
292 int in_len, int out_len)
294 struct ib_uverbs_get_context cmd;
295 struct ib_uverbs_get_context_resp resp;
296 struct ib_udata udata;
297 struct ib_device *ibdev = file->device->ib_dev;
298 struct ib_ucontext *ucontext;
302 if (out_len < sizeof resp)
305 if (copy_from_user(&cmd, buf, sizeof cmd))
308 mutex_lock(&file->mutex);
310 if (file->ucontext) {
315 INIT_UDATA(&udata, buf + sizeof cmd,
316 (unsigned long) cmd.response + sizeof resp,
317 in_len - sizeof cmd, out_len - sizeof resp);
319 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
320 if (IS_ERR(ucontext)) {
321 ret = PTR_ERR(ucontext);
325 ucontext->device = ibdev;
326 INIT_LIST_HEAD(&ucontext->pd_list);
327 INIT_LIST_HEAD(&ucontext->mr_list);
328 INIT_LIST_HEAD(&ucontext->mw_list);
329 INIT_LIST_HEAD(&ucontext->cq_list);
330 INIT_LIST_HEAD(&ucontext->qp_list);
331 INIT_LIST_HEAD(&ucontext->srq_list);
332 INIT_LIST_HEAD(&ucontext->ah_list);
333 INIT_LIST_HEAD(&ucontext->xrcd_list);
334 INIT_LIST_HEAD(&ucontext->rule_list);
335 ucontext->closing = 0;
337 resp.num_comp_vectors = file->device->num_comp_vectors;
339 ret = get_unused_fd_flags(O_CLOEXEC);
344 filp = ib_uverbs_alloc_event_file(file, 1);
350 if (copy_to_user((void __user *) (unsigned long) cmd.response,
351 &resp, sizeof resp)) {
356 file->async_file = filp->private_data;
358 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
359 ib_uverbs_event_handler);
360 ret = ib_register_event_handler(&file->event_handler);
364 kref_get(&file->async_file->ref);
365 kref_get(&file->ref);
366 file->ucontext = ucontext;
368 fd_install(resp.async_fd, filp);
370 mutex_unlock(&file->mutex);
378 put_unused_fd(resp.async_fd);
381 ibdev->dealloc_ucontext(ucontext);
384 mutex_unlock(&file->mutex);
388 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
389 const char __user *buf,
390 int in_len, int out_len)
392 struct ib_uverbs_query_device cmd;
393 struct ib_uverbs_query_device_resp resp;
394 struct ib_device_attr attr;
397 if (out_len < sizeof resp)
400 if (copy_from_user(&cmd, buf, sizeof cmd))
403 ret = ib_query_device(file->device->ib_dev, &attr);
407 memset(&resp, 0, sizeof resp);
409 resp.fw_ver = attr.fw_ver;
410 resp.node_guid = file->device->ib_dev->node_guid;
411 resp.sys_image_guid = attr.sys_image_guid;
412 resp.max_mr_size = attr.max_mr_size;
413 resp.page_size_cap = attr.page_size_cap;
414 resp.vendor_id = attr.vendor_id;
415 resp.vendor_part_id = attr.vendor_part_id;
416 resp.hw_ver = attr.hw_ver;
417 resp.max_qp = attr.max_qp;
418 resp.max_qp_wr = attr.max_qp_wr;
419 resp.device_cap_flags = attr.device_cap_flags;
420 resp.max_sge = attr.max_sge;
421 resp.max_sge_rd = attr.max_sge_rd;
422 resp.max_cq = attr.max_cq;
423 resp.max_cqe = attr.max_cqe;
424 resp.max_mr = attr.max_mr;
425 resp.max_pd = attr.max_pd;
426 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
427 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
428 resp.max_res_rd_atom = attr.max_res_rd_atom;
429 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
430 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
431 resp.atomic_cap = attr.atomic_cap;
432 resp.max_ee = attr.max_ee;
433 resp.max_rdd = attr.max_rdd;
434 resp.max_mw = attr.max_mw;
435 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
436 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
437 resp.max_mcast_grp = attr.max_mcast_grp;
438 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
439 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
440 resp.max_ah = attr.max_ah;
441 resp.max_fmr = attr.max_fmr;
442 resp.max_map_per_fmr = attr.max_map_per_fmr;
443 resp.max_srq = attr.max_srq;
444 resp.max_srq_wr = attr.max_srq_wr;
445 resp.max_srq_sge = attr.max_srq_sge;
446 resp.max_pkeys = attr.max_pkeys;
447 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
448 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
450 if (copy_to_user((void __user *) (unsigned long) cmd.response,
457 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
458 const char __user *buf,
459 int in_len, int out_len)
461 struct ib_uverbs_query_port cmd;
462 struct ib_uverbs_query_port_resp resp;
463 struct ib_port_attr attr;
466 if (out_len < sizeof resp)
469 if (copy_from_user(&cmd, buf, sizeof cmd))
472 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
476 memset(&resp, 0, sizeof resp);
478 resp.state = attr.state;
479 resp.max_mtu = attr.max_mtu;
480 resp.active_mtu = attr.active_mtu;
481 resp.gid_tbl_len = attr.gid_tbl_len;
482 resp.port_cap_flags = attr.port_cap_flags;
483 resp.max_msg_sz = attr.max_msg_sz;
484 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
485 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
486 resp.pkey_tbl_len = attr.pkey_tbl_len;
488 resp.sm_lid = attr.sm_lid;
490 resp.max_vl_num = attr.max_vl_num;
491 resp.sm_sl = attr.sm_sl;
492 resp.subnet_timeout = attr.subnet_timeout;
493 resp.init_type_reply = attr.init_type_reply;
494 resp.active_width = attr.active_width;
495 resp.active_speed = attr.active_speed;
496 resp.phys_state = attr.phys_state;
497 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev,
500 if (copy_to_user((void __user *) (unsigned long) cmd.response,
507 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
508 const char __user *buf,
509 int in_len, int out_len)
511 struct ib_uverbs_alloc_pd cmd;
512 struct ib_uverbs_alloc_pd_resp resp;
513 struct ib_udata udata;
514 struct ib_uobject *uobj;
518 if (out_len < sizeof resp)
521 if (copy_from_user(&cmd, buf, sizeof cmd))
524 INIT_UDATA(&udata, buf + sizeof cmd,
525 (unsigned long) cmd.response + sizeof resp,
526 in_len - sizeof cmd, out_len - sizeof resp);
528 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
532 init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
533 down_write(&uobj->mutex);
535 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
536 file->ucontext, &udata);
542 pd->device = file->device->ib_dev;
544 atomic_set(&pd->usecnt, 0);
547 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
551 memset(&resp, 0, sizeof resp);
552 resp.pd_handle = uobj->id;
554 if (copy_to_user((void __user *) (unsigned long) cmd.response,
555 &resp, sizeof resp)) {
560 mutex_lock(&file->mutex);
561 list_add_tail(&uobj->list, &file->ucontext->pd_list);
562 mutex_unlock(&file->mutex);
566 up_write(&uobj->mutex);
571 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
577 put_uobj_write(uobj);
581 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
582 const char __user *buf,
583 int in_len, int out_len)
585 struct ib_uverbs_dealloc_pd cmd;
586 struct ib_uobject *uobj;
589 if (copy_from_user(&cmd, buf, sizeof cmd))
592 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
596 ret = ib_dealloc_pd(uobj->object);
600 put_uobj_write(uobj);
605 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
607 mutex_lock(&file->mutex);
608 list_del(&uobj->list);
609 mutex_unlock(&file->mutex);
616 struct xrcd_table_entry {
618 struct ib_xrcd *xrcd;
622 static int xrcd_table_insert(struct ib_uverbs_device *dev,
624 struct ib_xrcd *xrcd)
626 struct xrcd_table_entry *entry, *scan;
627 struct rb_node **p = &dev->xrcd_tree.rb_node;
628 struct rb_node *parent = NULL;
630 entry = kmalloc(sizeof *entry, GFP_KERNEL);
635 entry->inode = inode;
639 scan = rb_entry(parent, struct xrcd_table_entry, node);
641 if (inode < scan->inode) {
643 } else if (inode > scan->inode) {
651 rb_link_node(&entry->node, parent, p);
652 rb_insert_color(&entry->node, &dev->xrcd_tree);
657 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
660 struct xrcd_table_entry *entry;
661 struct rb_node *p = dev->xrcd_tree.rb_node;
664 entry = rb_entry(p, struct xrcd_table_entry, node);
666 if (inode < entry->inode)
668 else if (inode > entry->inode)
677 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
679 struct xrcd_table_entry *entry;
681 entry = xrcd_table_search(dev, inode);
688 static void xrcd_table_delete(struct ib_uverbs_device *dev,
691 struct xrcd_table_entry *entry;
693 entry = xrcd_table_search(dev, inode);
696 rb_erase(&entry->node, &dev->xrcd_tree);
701 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
702 const char __user *buf, int in_len,
705 struct ib_uverbs_open_xrcd cmd;
706 struct ib_uverbs_open_xrcd_resp resp;
707 struct ib_udata udata;
708 struct ib_uxrcd_object *obj;
709 struct ib_xrcd *xrcd = NULL;
710 struct fd f = {NULL, 0};
711 struct inode *inode = NULL;
715 if (out_len < sizeof resp)
718 if (copy_from_user(&cmd, buf, sizeof cmd))
721 INIT_UDATA(&udata, buf + sizeof cmd,
722 (unsigned long) cmd.response + sizeof resp,
723 in_len - sizeof cmd, out_len - sizeof resp);
725 mutex_lock(&file->device->xrcd_tree_mutex);
728 /* search for file descriptor */
732 goto err_tree_mutex_unlock;
735 inode = file_inode(f.file);
736 xrcd = find_xrcd(file->device, inode);
737 if (!xrcd && !(cmd.oflags & O_CREAT)) {
738 /* no file descriptor. Need CREATE flag */
740 goto err_tree_mutex_unlock;
743 if (xrcd && cmd.oflags & O_EXCL) {
745 goto err_tree_mutex_unlock;
749 obj = kmalloc(sizeof *obj, GFP_KERNEL);
752 goto err_tree_mutex_unlock;
755 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
757 down_write(&obj->uobject.mutex);
760 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
761 file->ucontext, &udata);
768 xrcd->device = file->device->ib_dev;
769 atomic_set(&xrcd->usecnt, 0);
770 mutex_init(&xrcd->tgt_qp_mutex);
771 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
775 atomic_set(&obj->refcnt, 0);
776 obj->uobject.object = xrcd;
777 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
781 memset(&resp, 0, sizeof resp);
782 resp.xrcd_handle = obj->uobject.id;
786 /* create new inode/xrcd table entry */
787 ret = xrcd_table_insert(file->device, inode, xrcd);
789 goto err_insert_xrcd;
791 atomic_inc(&xrcd->usecnt);
794 if (copy_to_user((void __user *) (unsigned long) cmd.response,
795 &resp, sizeof resp)) {
803 mutex_lock(&file->mutex);
804 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
805 mutex_unlock(&file->mutex);
807 obj->uobject.live = 1;
808 up_write(&obj->uobject.mutex);
810 mutex_unlock(&file->device->xrcd_tree_mutex);
816 xrcd_table_delete(file->device, inode);
817 atomic_dec(&xrcd->usecnt);
821 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
824 ib_dealloc_xrcd(xrcd);
827 put_uobj_write(&obj->uobject);
829 err_tree_mutex_unlock:
833 mutex_unlock(&file->device->xrcd_tree_mutex);
838 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
839 const char __user *buf, int in_len,
842 struct ib_uverbs_close_xrcd cmd;
843 struct ib_uobject *uobj;
844 struct ib_xrcd *xrcd = NULL;
845 struct inode *inode = NULL;
846 struct ib_uxrcd_object *obj;
850 if (copy_from_user(&cmd, buf, sizeof cmd))
853 mutex_lock(&file->device->xrcd_tree_mutex);
854 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
862 obj = container_of(uobj, struct ib_uxrcd_object, uobject);
863 if (atomic_read(&obj->refcnt)) {
864 put_uobj_write(uobj);
869 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
870 ret = ib_dealloc_xrcd(uobj->object);
877 atomic_inc(&xrcd->usecnt);
879 put_uobj_write(uobj);
885 xrcd_table_delete(file->device, inode);
887 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
888 mutex_lock(&file->mutex);
889 list_del(&uobj->list);
890 mutex_unlock(&file->mutex);
896 mutex_unlock(&file->device->xrcd_tree_mutex);
900 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
901 struct ib_xrcd *xrcd)
906 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
909 ib_dealloc_xrcd(xrcd);
912 xrcd_table_delete(dev, inode);
915 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
916 const char __user *buf, int in_len,
919 struct ib_uverbs_reg_mr cmd;
920 struct ib_uverbs_reg_mr_resp resp;
921 struct ib_udata udata;
922 struct ib_uobject *uobj;
927 if (out_len < sizeof resp)
930 if (copy_from_user(&cmd, buf, sizeof cmd))
933 INIT_UDATA(&udata, buf + sizeof cmd,
934 (unsigned long) cmd.response + sizeof resp,
935 in_len - sizeof cmd, out_len - sizeof resp);
937 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
941 * Local write permission is required if remote write or
942 * remote atomic permission is also requested.
944 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
945 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
948 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
952 init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
953 down_write(&uobj->mutex);
955 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
961 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
962 cmd.access_flags, &udata);
968 mr->device = pd->device;
971 atomic_inc(&pd->usecnt);
972 atomic_set(&mr->usecnt, 0);
975 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
979 memset(&resp, 0, sizeof resp);
980 resp.lkey = mr->lkey;
981 resp.rkey = mr->rkey;
982 resp.mr_handle = uobj->id;
984 if (copy_to_user((void __user *) (unsigned long) cmd.response,
985 &resp, sizeof resp)) {
992 mutex_lock(&file->mutex);
993 list_add_tail(&uobj->list, &file->ucontext->mr_list);
994 mutex_unlock(&file->mutex);
998 up_write(&uobj->mutex);
1003 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1012 put_uobj_write(uobj);
1016 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1017 const char __user *buf, int in_len,
1020 struct ib_uverbs_dereg_mr cmd;
1022 struct ib_uobject *uobj;
1025 if (copy_from_user(&cmd, buf, sizeof cmd))
1028 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1034 ret = ib_dereg_mr(mr);
1038 put_uobj_write(uobj);
1043 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1045 mutex_lock(&file->mutex);
1046 list_del(&uobj->list);
1047 mutex_unlock(&file->mutex);
1054 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1055 const char __user *buf, int in_len,
1058 struct ib_uverbs_alloc_mw cmd;
1059 struct ib_uverbs_alloc_mw_resp resp;
1060 struct ib_uobject *uobj;
1065 if (out_len < sizeof(resp))
1068 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1071 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1075 init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1076 down_write(&uobj->mutex);
1078 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1084 mw = pd->device->alloc_mw(pd, cmd.mw_type);
1090 mw->device = pd->device;
1093 atomic_inc(&pd->usecnt);
1096 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1100 memset(&resp, 0, sizeof(resp));
1101 resp.rkey = mw->rkey;
1102 resp.mw_handle = uobj->id;
1104 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1105 &resp, sizeof(resp))) {
1112 mutex_lock(&file->mutex);
1113 list_add_tail(&uobj->list, &file->ucontext->mw_list);
1114 mutex_unlock(&file->mutex);
1118 up_write(&uobj->mutex);
1123 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1132 put_uobj_write(uobj);
1136 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1137 const char __user *buf, int in_len,
1140 struct ib_uverbs_dealloc_mw cmd;
1142 struct ib_uobject *uobj;
1145 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1148 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1154 ret = ib_dealloc_mw(mw);
1158 put_uobj_write(uobj);
1163 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1165 mutex_lock(&file->mutex);
1166 list_del(&uobj->list);
1167 mutex_unlock(&file->mutex);
1174 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1175 const char __user *buf, int in_len,
1178 struct ib_uverbs_create_comp_channel cmd;
1179 struct ib_uverbs_create_comp_channel_resp resp;
1183 if (out_len < sizeof resp)
1186 if (copy_from_user(&cmd, buf, sizeof cmd))
1189 ret = get_unused_fd_flags(O_CLOEXEC);
1194 filp = ib_uverbs_alloc_event_file(file, 0);
1196 put_unused_fd(resp.fd);
1197 return PTR_ERR(filp);
1200 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1201 &resp, sizeof resp)) {
1202 put_unused_fd(resp.fd);
1207 fd_install(resp.fd, filp);
1211 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1212 const char __user *buf, int in_len,
1215 struct ib_uverbs_create_cq cmd;
1216 struct ib_uverbs_create_cq_resp resp;
1217 struct ib_udata udata;
1218 struct ib_ucq_object *obj;
1219 struct ib_uverbs_event_file *ev_file = NULL;
1223 if (out_len < sizeof resp)
1226 if (copy_from_user(&cmd, buf, sizeof cmd))
1229 INIT_UDATA(&udata, buf + sizeof cmd,
1230 (unsigned long) cmd.response + sizeof resp,
1231 in_len - sizeof cmd, out_len - sizeof resp);
1233 if (cmd.comp_vector >= file->device->num_comp_vectors)
1236 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1240 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
1241 down_write(&obj->uobject.mutex);
1243 if (cmd.comp_channel >= 0) {
1244 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
1251 obj->uverbs_file = file;
1252 obj->comp_events_reported = 0;
1253 obj->async_events_reported = 0;
1254 INIT_LIST_HEAD(&obj->comp_list);
1255 INIT_LIST_HEAD(&obj->async_list);
1257 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
1259 file->ucontext, &udata);
1265 cq->device = file->device->ib_dev;
1266 cq->uobject = &obj->uobject;
1267 cq->comp_handler = ib_uverbs_comp_handler;
1268 cq->event_handler = ib_uverbs_cq_event_handler;
1269 cq->cq_context = ev_file;
1270 atomic_set(&cq->usecnt, 0);
1272 obj->uobject.object = cq;
1273 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1277 memset(&resp, 0, sizeof resp);
1278 resp.cq_handle = obj->uobject.id;
1281 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1282 &resp, sizeof resp)) {
1287 mutex_lock(&file->mutex);
1288 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1289 mutex_unlock(&file->mutex);
1291 obj->uobject.live = 1;
1293 up_write(&obj->uobject.mutex);
1298 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1305 ib_uverbs_release_ucq(file, ev_file, obj);
1308 put_uobj_write(&obj->uobject);
1312 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1313 const char __user *buf, int in_len,
1316 struct ib_uverbs_resize_cq cmd;
1317 struct ib_uverbs_resize_cq_resp resp;
1318 struct ib_udata udata;
1322 if (copy_from_user(&cmd, buf, sizeof cmd))
1325 INIT_UDATA(&udata, buf + sizeof cmd,
1326 (unsigned long) cmd.response + sizeof resp,
1327 in_len - sizeof cmd, out_len - sizeof resp);
1329 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1333 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1339 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1340 &resp, sizeof resp.cqe))
1346 return ret ? ret : in_len;
1349 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1351 struct ib_uverbs_wc tmp;
1353 tmp.wr_id = wc->wr_id;
1354 tmp.status = wc->status;
1355 tmp.opcode = wc->opcode;
1356 tmp.vendor_err = wc->vendor_err;
1357 tmp.byte_len = wc->byte_len;
1358 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1359 tmp.qp_num = wc->qp->qp_num;
1360 tmp.src_qp = wc->src_qp;
1361 tmp.wc_flags = wc->wc_flags;
1362 tmp.pkey_index = wc->pkey_index;
1363 tmp.slid = wc->slid;
1365 tmp.dlid_path_bits = wc->dlid_path_bits;
1366 tmp.port_num = wc->port_num;
1369 if (copy_to_user(dest, &tmp, sizeof tmp))
1375 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1376 const char __user *buf, int in_len,
1379 struct ib_uverbs_poll_cq cmd;
1380 struct ib_uverbs_poll_cq_resp resp;
1381 u8 __user *header_ptr;
1382 u8 __user *data_ptr;
1387 if (copy_from_user(&cmd, buf, sizeof cmd))
1390 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1394 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1395 header_ptr = (void __user *)(unsigned long) cmd.response;
1396 data_ptr = header_ptr + sizeof resp;
1398 memset(&resp, 0, sizeof resp);
1399 while (resp.count < cmd.ne) {
1400 ret = ib_poll_cq(cq, 1, &wc);
1406 ret = copy_wc_to_user(data_ptr, &wc);
1410 data_ptr += sizeof(struct ib_uverbs_wc);
1414 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1426 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1427 const char __user *buf, int in_len,
1430 struct ib_uverbs_req_notify_cq cmd;
1433 if (copy_from_user(&cmd, buf, sizeof cmd))
1436 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1440 ib_req_notify_cq(cq, cmd.solicited_only ?
1441 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1448 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1449 const char __user *buf, int in_len,
1452 struct ib_uverbs_destroy_cq cmd;
1453 struct ib_uverbs_destroy_cq_resp resp;
1454 struct ib_uobject *uobj;
1456 struct ib_ucq_object *obj;
1457 struct ib_uverbs_event_file *ev_file;
1460 if (copy_from_user(&cmd, buf, sizeof cmd))
1463 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1467 ev_file = cq->cq_context;
1468 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1470 ret = ib_destroy_cq(cq);
1474 put_uobj_write(uobj);
1479 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1481 mutex_lock(&file->mutex);
1482 list_del(&uobj->list);
1483 mutex_unlock(&file->mutex);
1485 ib_uverbs_release_ucq(file, ev_file, obj);
1487 memset(&resp, 0, sizeof resp);
1488 resp.comp_events_reported = obj->comp_events_reported;
1489 resp.async_events_reported = obj->async_events_reported;
1493 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1494 &resp, sizeof resp))
1500 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1501 const char __user *buf, int in_len,
1504 struct ib_uverbs_create_qp cmd;
1505 struct ib_uverbs_create_qp_resp resp;
1506 struct ib_udata udata;
1507 struct ib_uqp_object *obj;
1508 struct ib_device *device;
1509 struct ib_pd *pd = NULL;
1510 struct ib_xrcd *xrcd = NULL;
1511 struct ib_uobject *uninitialized_var(xrcd_uobj);
1512 struct ib_cq *scq = NULL, *rcq = NULL;
1513 struct ib_srq *srq = NULL;
1515 struct ib_qp_init_attr attr;
1518 if (out_len < sizeof resp)
1521 if (copy_from_user(&cmd, buf, sizeof cmd))
1524 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1527 INIT_UDATA(&udata, buf + sizeof cmd,
1528 (unsigned long) cmd.response + sizeof resp,
1529 in_len - sizeof cmd, out_len - sizeof resp);
1531 obj = kzalloc(sizeof *obj, GFP_KERNEL);
1535 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1536 down_write(&obj->uevent.uobject.mutex);
1538 if (cmd.qp_type == IB_QPT_XRC_TGT) {
1539 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1544 device = xrcd->device;
1546 if (cmd.qp_type == IB_QPT_XRC_INI) {
1547 cmd.max_recv_wr = cmd.max_recv_sge = 0;
1550 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1551 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1557 if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1558 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1566 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1568 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1574 device = pd->device;
1577 attr.event_handler = ib_uverbs_qp_event_handler;
1578 attr.qp_context = file;
1583 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1584 attr.qp_type = cmd.qp_type;
1585 attr.create_flags = 0;
1587 attr.cap.max_send_wr = cmd.max_send_wr;
1588 attr.cap.max_recv_wr = cmd.max_recv_wr;
1589 attr.cap.max_send_sge = cmd.max_send_sge;
1590 attr.cap.max_recv_sge = cmd.max_recv_sge;
1591 attr.cap.max_inline_data = cmd.max_inline_data;
1593 obj->uevent.events_reported = 0;
1594 INIT_LIST_HEAD(&obj->uevent.event_list);
1595 INIT_LIST_HEAD(&obj->mcast_list);
1597 if (cmd.qp_type == IB_QPT_XRC_TGT)
1598 qp = ib_create_qp(pd, &attr);
1600 qp = device->create_qp(pd, &attr, &udata);
1607 if (cmd.qp_type != IB_QPT_XRC_TGT) {
1609 qp->device = device;
1611 qp->send_cq = attr.send_cq;
1612 qp->recv_cq = attr.recv_cq;
1614 qp->event_handler = attr.event_handler;
1615 qp->qp_context = attr.qp_context;
1616 qp->qp_type = attr.qp_type;
1617 atomic_set(&qp->usecnt, 0);
1618 atomic_inc(&pd->usecnt);
1619 atomic_inc(&attr.send_cq->usecnt);
1621 atomic_inc(&attr.recv_cq->usecnt);
1623 atomic_inc(&attr.srq->usecnt);
1625 qp->uobject = &obj->uevent.uobject;
1627 obj->uevent.uobject.object = qp;
1628 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1632 memset(&resp, 0, sizeof resp);
1633 resp.qpn = qp->qp_num;
1634 resp.qp_handle = obj->uevent.uobject.id;
1635 resp.max_recv_sge = attr.cap.max_recv_sge;
1636 resp.max_send_sge = attr.cap.max_send_sge;
1637 resp.max_recv_wr = attr.cap.max_recv_wr;
1638 resp.max_send_wr = attr.cap.max_send_wr;
1639 resp.max_inline_data = attr.cap.max_inline_data;
1641 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1642 &resp, sizeof resp)) {
1648 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1650 atomic_inc(&obj->uxrcd->refcnt);
1651 put_xrcd_read(xrcd_uobj);
1658 if (rcq && rcq != scq)
1663 mutex_lock(&file->mutex);
1664 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1665 mutex_unlock(&file->mutex);
1667 obj->uevent.uobject.live = 1;
1669 up_write(&obj->uevent.uobject.mutex);
1674 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1681 put_xrcd_read(xrcd_uobj);
1686 if (rcq && rcq != scq)
1691 put_uobj_write(&obj->uevent.uobject);
1695 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1696 const char __user *buf, int in_len, int out_len)
1698 struct ib_uverbs_open_qp cmd;
1699 struct ib_uverbs_create_qp_resp resp;
1700 struct ib_udata udata;
1701 struct ib_uqp_object *obj;
1702 struct ib_xrcd *xrcd;
1703 struct ib_uobject *uninitialized_var(xrcd_uobj);
1705 struct ib_qp_open_attr attr;
1708 if (out_len < sizeof resp)
1711 if (copy_from_user(&cmd, buf, sizeof cmd))
1714 INIT_UDATA(&udata, buf + sizeof cmd,
1715 (unsigned long) cmd.response + sizeof resp,
1716 in_len - sizeof cmd, out_len - sizeof resp);
1718 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1722 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1723 down_write(&obj->uevent.uobject.mutex);
1725 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1731 attr.event_handler = ib_uverbs_qp_event_handler;
1732 attr.qp_context = file;
1733 attr.qp_num = cmd.qpn;
1734 attr.qp_type = cmd.qp_type;
1736 obj->uevent.events_reported = 0;
1737 INIT_LIST_HEAD(&obj->uevent.event_list);
1738 INIT_LIST_HEAD(&obj->mcast_list);
1740 qp = ib_open_qp(xrcd, &attr);
1746 qp->uobject = &obj->uevent.uobject;
1748 obj->uevent.uobject.object = qp;
1749 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1753 memset(&resp, 0, sizeof resp);
1754 resp.qpn = qp->qp_num;
1755 resp.qp_handle = obj->uevent.uobject.id;
1757 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1758 &resp, sizeof resp)) {
1763 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1764 atomic_inc(&obj->uxrcd->refcnt);
1765 put_xrcd_read(xrcd_uobj);
1767 mutex_lock(&file->mutex);
1768 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1769 mutex_unlock(&file->mutex);
1771 obj->uevent.uobject.live = 1;
1773 up_write(&obj->uevent.uobject.mutex);
1778 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1784 put_xrcd_read(xrcd_uobj);
1785 put_uobj_write(&obj->uevent.uobject);
1789 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1790 const char __user *buf, int in_len,
1793 struct ib_uverbs_query_qp cmd;
1794 struct ib_uverbs_query_qp_resp resp;
1796 struct ib_qp_attr *attr;
1797 struct ib_qp_init_attr *init_attr;
1800 if (copy_from_user(&cmd, buf, sizeof cmd))
1803 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1804 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1805 if (!attr || !init_attr) {
1810 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1816 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1823 memset(&resp, 0, sizeof resp);
1825 resp.qp_state = attr->qp_state;
1826 resp.cur_qp_state = attr->cur_qp_state;
1827 resp.path_mtu = attr->path_mtu;
1828 resp.path_mig_state = attr->path_mig_state;
1829 resp.qkey = attr->qkey;
1830 resp.rq_psn = attr->rq_psn;
1831 resp.sq_psn = attr->sq_psn;
1832 resp.dest_qp_num = attr->dest_qp_num;
1833 resp.qp_access_flags = attr->qp_access_flags;
1834 resp.pkey_index = attr->pkey_index;
1835 resp.alt_pkey_index = attr->alt_pkey_index;
1836 resp.sq_draining = attr->sq_draining;
1837 resp.max_rd_atomic = attr->max_rd_atomic;
1838 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1839 resp.min_rnr_timer = attr->min_rnr_timer;
1840 resp.port_num = attr->port_num;
1841 resp.timeout = attr->timeout;
1842 resp.retry_cnt = attr->retry_cnt;
1843 resp.rnr_retry = attr->rnr_retry;
1844 resp.alt_port_num = attr->alt_port_num;
1845 resp.alt_timeout = attr->alt_timeout;
1847 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1848 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
1849 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
1850 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
1851 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
1852 resp.dest.dlid = attr->ah_attr.dlid;
1853 resp.dest.sl = attr->ah_attr.sl;
1854 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
1855 resp.dest.static_rate = attr->ah_attr.static_rate;
1856 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1857 resp.dest.port_num = attr->ah_attr.port_num;
1859 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1860 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
1861 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
1862 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
1863 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1864 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
1865 resp.alt_dest.sl = attr->alt_ah_attr.sl;
1866 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1867 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
1868 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1869 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
1871 resp.max_send_wr = init_attr->cap.max_send_wr;
1872 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1873 resp.max_send_sge = init_attr->cap.max_send_sge;
1874 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1875 resp.max_inline_data = init_attr->cap.max_inline_data;
1876 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1878 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1879 &resp, sizeof resp))
1886 return ret ? ret : in_len;
1889 /* Remove ignored fields set in the attribute mask */
1890 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1893 case IB_QPT_XRC_INI:
1894 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1895 case IB_QPT_XRC_TGT:
1896 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1903 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1904 const char __user *buf, int in_len,
1907 struct ib_uverbs_modify_qp cmd;
1908 struct ib_udata udata;
1910 struct ib_qp_attr *attr;
1913 if (copy_from_user(&cmd, buf, sizeof cmd))
1916 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1919 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1923 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1929 attr->qp_state = cmd.qp_state;
1930 attr->cur_qp_state = cmd.cur_qp_state;
1931 attr->path_mtu = cmd.path_mtu;
1932 attr->path_mig_state = cmd.path_mig_state;
1933 attr->qkey = cmd.qkey;
1934 attr->rq_psn = cmd.rq_psn;
1935 attr->sq_psn = cmd.sq_psn;
1936 attr->dest_qp_num = cmd.dest_qp_num;
1937 attr->qp_access_flags = cmd.qp_access_flags;
1938 attr->pkey_index = cmd.pkey_index;
1939 attr->alt_pkey_index = cmd.alt_pkey_index;
1940 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1941 attr->max_rd_atomic = cmd.max_rd_atomic;
1942 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
1943 attr->min_rnr_timer = cmd.min_rnr_timer;
1944 attr->port_num = cmd.port_num;
1945 attr->timeout = cmd.timeout;
1946 attr->retry_cnt = cmd.retry_cnt;
1947 attr->rnr_retry = cmd.rnr_retry;
1948 attr->alt_port_num = cmd.alt_port_num;
1949 attr->alt_timeout = cmd.alt_timeout;
1951 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1952 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
1953 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
1954 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
1955 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
1956 attr->ah_attr.dlid = cmd.dest.dlid;
1957 attr->ah_attr.sl = cmd.dest.sl;
1958 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
1959 attr->ah_attr.static_rate = cmd.dest.static_rate;
1960 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
1961 attr->ah_attr.port_num = cmd.dest.port_num;
1963 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1964 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
1965 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
1966 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
1967 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1968 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
1969 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
1970 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
1971 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
1972 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1973 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
1975 if (qp->real_qp == qp) {
1976 ret = qp->device->modify_qp(qp, attr,
1977 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
1979 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
1995 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1996 const char __user *buf, int in_len,
1999 struct ib_uverbs_destroy_qp cmd;
2000 struct ib_uverbs_destroy_qp_resp resp;
2001 struct ib_uobject *uobj;
2003 struct ib_uqp_object *obj;
2006 if (copy_from_user(&cmd, buf, sizeof cmd))
2009 memset(&resp, 0, sizeof resp);
2011 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2015 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2017 if (!list_empty(&obj->mcast_list)) {
2018 put_uobj_write(uobj);
2022 ret = ib_destroy_qp(qp);
2026 put_uobj_write(uobj);
2032 atomic_dec(&obj->uxrcd->refcnt);
2034 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2036 mutex_lock(&file->mutex);
2037 list_del(&uobj->list);
2038 mutex_unlock(&file->mutex);
2040 ib_uverbs_release_uevent(file, &obj->uevent);
2042 resp.events_reported = obj->uevent.events_reported;
2046 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2047 &resp, sizeof resp))
2053 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2054 const char __user *buf, int in_len,
2057 struct ib_uverbs_post_send cmd;
2058 struct ib_uverbs_post_send_resp resp;
2059 struct ib_uverbs_send_wr *user_wr;
2060 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2064 ssize_t ret = -EINVAL;
2066 if (copy_from_user(&cmd, buf, sizeof cmd))
2069 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2070 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2073 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2076 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2080 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2084 is_ud = qp->qp_type == IB_QPT_UD;
2087 for (i = 0; i < cmd.wr_count; ++i) {
2088 if (copy_from_user(user_wr,
2089 buf + sizeof cmd + i * cmd.wqe_size,
2095 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2100 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2101 user_wr->num_sge * sizeof (struct ib_sge),
2115 next->wr_id = user_wr->wr_id;
2116 next->num_sge = user_wr->num_sge;
2117 next->opcode = user_wr->opcode;
2118 next->send_flags = user_wr->send_flags;
2121 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2123 if (!next->wr.ud.ah) {
2127 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
2128 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2130 switch (next->opcode) {
2131 case IB_WR_RDMA_WRITE_WITH_IMM:
2133 (__be32 __force) user_wr->ex.imm_data;
2134 case IB_WR_RDMA_WRITE:
2135 case IB_WR_RDMA_READ:
2136 next->wr.rdma.remote_addr =
2137 user_wr->wr.rdma.remote_addr;
2138 next->wr.rdma.rkey =
2139 user_wr->wr.rdma.rkey;
2141 case IB_WR_SEND_WITH_IMM:
2143 (__be32 __force) user_wr->ex.imm_data;
2145 case IB_WR_SEND_WITH_INV:
2146 next->ex.invalidate_rkey =
2147 user_wr->ex.invalidate_rkey;
2149 case IB_WR_ATOMIC_CMP_AND_SWP:
2150 case IB_WR_ATOMIC_FETCH_AND_ADD:
2151 next->wr.atomic.remote_addr =
2152 user_wr->wr.atomic.remote_addr;
2153 next->wr.atomic.compare_add =
2154 user_wr->wr.atomic.compare_add;
2155 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2156 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2163 if (next->num_sge) {
2164 next->sg_list = (void *) next +
2165 ALIGN(sizeof *next, sizeof (struct ib_sge));
2166 if (copy_from_user(next->sg_list,
2168 cmd.wr_count * cmd.wqe_size +
2169 sg_ind * sizeof (struct ib_sge),
2170 next->num_sge * sizeof (struct ib_sge))) {
2174 sg_ind += next->num_sge;
2176 next->sg_list = NULL;
2180 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2182 for (next = wr; next; next = next->next) {
2188 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2189 &resp, sizeof resp))
2196 if (is_ud && wr->wr.ud.ah)
2197 put_ah_read(wr->wr.ud.ah);
2206 return ret ? ret : in_len;
2209 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2215 struct ib_uverbs_recv_wr *user_wr;
2216 struct ib_recv_wr *wr = NULL, *last, *next;
2221 if (in_len < wqe_size * wr_count +
2222 sge_count * sizeof (struct ib_uverbs_sge))
2223 return ERR_PTR(-EINVAL);
2225 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2226 return ERR_PTR(-EINVAL);
2228 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2230 return ERR_PTR(-ENOMEM);
2234 for (i = 0; i < wr_count; ++i) {
2235 if (copy_from_user(user_wr, buf + i * wqe_size,
2241 if (user_wr->num_sge + sg_ind > sge_count) {
2246 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2247 user_wr->num_sge * sizeof (struct ib_sge),
2261 next->wr_id = user_wr->wr_id;
2262 next->num_sge = user_wr->num_sge;
2264 if (next->num_sge) {
2265 next->sg_list = (void *) next +
2266 ALIGN(sizeof *next, sizeof (struct ib_sge));
2267 if (copy_from_user(next->sg_list,
2268 buf + wr_count * wqe_size +
2269 sg_ind * sizeof (struct ib_sge),
2270 next->num_sge * sizeof (struct ib_sge))) {
2274 sg_ind += next->num_sge;
2276 next->sg_list = NULL;
2291 return ERR_PTR(ret);
2294 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2295 const char __user *buf, int in_len,
2298 struct ib_uverbs_post_recv cmd;
2299 struct ib_uverbs_post_recv_resp resp;
2300 struct ib_recv_wr *wr, *next, *bad_wr;
2302 ssize_t ret = -EINVAL;
2304 if (copy_from_user(&cmd, buf, sizeof cmd))
2307 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2308 in_len - sizeof cmd, cmd.wr_count,
2309 cmd.sge_count, cmd.wqe_size);
2313 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2318 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2323 for (next = wr; next; next = next->next) {
2329 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2330 &resp, sizeof resp))
2340 return ret ? ret : in_len;
2343 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2344 const char __user *buf, int in_len,
2347 struct ib_uverbs_post_srq_recv cmd;
2348 struct ib_uverbs_post_srq_recv_resp resp;
2349 struct ib_recv_wr *wr, *next, *bad_wr;
2351 ssize_t ret = -EINVAL;
2353 if (copy_from_user(&cmd, buf, sizeof cmd))
2356 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2357 in_len - sizeof cmd, cmd.wr_count,
2358 cmd.sge_count, cmd.wqe_size);
2362 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2367 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2372 for (next = wr; next; next = next->next) {
2378 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2379 &resp, sizeof resp))
2389 return ret ? ret : in_len;
2392 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2393 const char __user *buf, int in_len,
2396 struct ib_uverbs_create_ah cmd;
2397 struct ib_uverbs_create_ah_resp resp;
2398 struct ib_uobject *uobj;
2401 struct ib_ah_attr attr;
2404 if (out_len < sizeof resp)
2407 if (copy_from_user(&cmd, buf, sizeof cmd))
2410 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2414 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2415 down_write(&uobj->mutex);
2417 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2423 attr.dlid = cmd.attr.dlid;
2424 attr.sl = cmd.attr.sl;
2425 attr.src_path_bits = cmd.attr.src_path_bits;
2426 attr.static_rate = cmd.attr.static_rate;
2427 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
2428 attr.port_num = cmd.attr.port_num;
2429 attr.grh.flow_label = cmd.attr.grh.flow_label;
2430 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
2431 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
2432 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2433 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2435 ah = ib_create_ah(pd, &attr);
2444 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2448 resp.ah_handle = uobj->id;
2450 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2451 &resp, sizeof resp)) {
2458 mutex_lock(&file->mutex);
2459 list_add_tail(&uobj->list, &file->ucontext->ah_list);
2460 mutex_unlock(&file->mutex);
2464 up_write(&uobj->mutex);
2469 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2478 put_uobj_write(uobj);
2482 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2483 const char __user *buf, int in_len, int out_len)
2485 struct ib_uverbs_destroy_ah cmd;
2487 struct ib_uobject *uobj;
2490 if (copy_from_user(&cmd, buf, sizeof cmd))
2493 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2498 ret = ib_destroy_ah(ah);
2502 put_uobj_write(uobj);
2507 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2509 mutex_lock(&file->mutex);
2510 list_del(&uobj->list);
2511 mutex_unlock(&file->mutex);
2518 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2519 const char __user *buf, int in_len,
2522 struct ib_uverbs_attach_mcast cmd;
2524 struct ib_uqp_object *obj;
2525 struct ib_uverbs_mcast_entry *mcast;
2528 if (copy_from_user(&cmd, buf, sizeof cmd))
2531 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2535 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2537 list_for_each_entry(mcast, &obj->mcast_list, list)
2538 if (cmd.mlid == mcast->lid &&
2539 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2544 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2550 mcast->lid = cmd.mlid;
2551 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2553 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2555 list_add_tail(&mcast->list, &obj->mcast_list);
2562 return ret ? ret : in_len;
2565 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2566 const char __user *buf, int in_len,
2569 struct ib_uverbs_detach_mcast cmd;
2570 struct ib_uqp_object *obj;
2572 struct ib_uverbs_mcast_entry *mcast;
2575 if (copy_from_user(&cmd, buf, sizeof cmd))
2578 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2582 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2586 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2588 list_for_each_entry(mcast, &obj->mcast_list, list)
2589 if (cmd.mlid == mcast->lid &&
2590 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2591 list_del(&mcast->list);
2599 return ret ? ret : in_len;
2602 static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
2603 union ib_flow_spec *ib_spec)
2605 ib_spec->type = kern_spec->type;
2607 switch (ib_spec->type) {
2608 case IB_FLOW_SPEC_ETH:
2609 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2610 if (ib_spec->eth.size != kern_spec->eth.size)
2612 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2613 sizeof(struct ib_flow_eth_filter));
2614 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2615 sizeof(struct ib_flow_eth_filter));
2617 case IB_FLOW_SPEC_IPV4:
2618 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2619 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2621 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2622 sizeof(struct ib_flow_ipv4_filter));
2623 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2624 sizeof(struct ib_flow_ipv4_filter));
2626 case IB_FLOW_SPEC_TCP:
2627 case IB_FLOW_SPEC_UDP:
2628 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2629 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2631 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2632 sizeof(struct ib_flow_tcp_udp_filter));
2633 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2634 sizeof(struct ib_flow_tcp_udp_filter));
2642 ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
2643 const char __user *buf, int in_len,
2646 struct ib_uverbs_create_flow cmd;
2647 struct ib_uverbs_create_flow_resp resp;
2648 struct ib_uobject *uobj;
2649 struct ib_flow *flow_id;
2650 struct ib_kern_flow_attr *kern_flow_attr;
2651 struct ib_flow_attr *flow_attr;
2659 if (out_len < sizeof(resp))
2662 if (copy_from_user(&cmd, buf, sizeof(cmd)))
2668 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2669 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2672 if (cmd.flow_attr.num_of_specs < 0 ||
2673 cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
2676 kern_attr_size = cmd.flow_attr.size - sizeof(cmd) -
2677 sizeof(struct ib_uverbs_cmd_hdr_ex);
2679 if (cmd.flow_attr.size < 0 || cmd.flow_attr.size > in_len ||
2680 kern_attr_size < 0 || kern_attr_size >
2681 (cmd.flow_attr.num_of_specs * sizeof(struct ib_kern_spec)))
2684 if (cmd.flow_attr.num_of_specs) {
2685 kern_flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
2686 if (!kern_flow_attr)
2689 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
2690 if (copy_from_user(kern_flow_attr + 1, buf + sizeof(cmd),
2696 kern_flow_attr = &cmd.flow_attr;
2697 kern_attr_size = sizeof(cmd.flow_attr);
2700 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2705 init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2706 down_write(&uobj->mutex);
2708 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2714 flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
2720 flow_attr->type = kern_flow_attr->type;
2721 flow_attr->priority = kern_flow_attr->priority;
2722 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2723 flow_attr->port = kern_flow_attr->port;
2724 flow_attr->flags = kern_flow_attr->flags;
2725 flow_attr->size = sizeof(*flow_attr);
2727 kern_spec = kern_flow_attr + 1;
2728 ib_spec = flow_attr + 1;
2729 for (i = 0; i < flow_attr->num_of_specs && kern_attr_size > 0; i++) {
2730 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
2734 ((union ib_flow_spec *) ib_spec)->size;
2735 kern_attr_size -= ((struct ib_kern_spec *) kern_spec)->size;
2736 kern_spec += ((struct ib_kern_spec *) kern_spec)->size;
2737 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
2739 if (kern_attr_size) {
2740 pr_warn("create flow failed, %d bytes left from uverb cmd\n",
2744 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
2745 if (IS_ERR(flow_id)) {
2746 err = PTR_ERR(flow_id);
2750 flow_id->uobject = uobj;
2751 uobj->object = flow_id;
2753 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
2757 memset(&resp, 0, sizeof(resp));
2758 resp.flow_handle = uobj->id;
2760 if (copy_to_user((void __user *)(unsigned long) cmd.response,
2761 &resp, sizeof(resp))) {
2767 mutex_lock(&file->mutex);
2768 list_add_tail(&uobj->list, &file->ucontext->rule_list);
2769 mutex_unlock(&file->mutex);
2773 up_write(&uobj->mutex);
2775 if (cmd.flow_attr.num_of_specs)
2776 kfree(kern_flow_attr);
2779 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2781 ib_destroy_flow(flow_id);
2787 put_uobj_write(uobj);
2789 if (cmd.flow_attr.num_of_specs)
2790 kfree(kern_flow_attr);
2794 ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
2795 const char __user *buf, int in_len,
2797 struct ib_uverbs_destroy_flow cmd;
2798 struct ib_flow *flow_id;
2799 struct ib_uobject *uobj;
2802 if (copy_from_user(&cmd, buf, sizeof(cmd)))
2805 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2809 flow_id = uobj->object;
2811 ret = ib_destroy_flow(flow_id);
2815 put_uobj_write(uobj);
2817 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2819 mutex_lock(&file->mutex);
2820 list_del(&uobj->list);
2821 mutex_unlock(&file->mutex);
2825 return ret ? ret : in_len;
2828 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2829 struct ib_uverbs_create_xsrq *cmd,
2830 struct ib_udata *udata)
2832 struct ib_uverbs_create_srq_resp resp;
2833 struct ib_usrq_object *obj;
2836 struct ib_uobject *uninitialized_var(xrcd_uobj);
2837 struct ib_srq_init_attr attr;
2840 obj = kmalloc(sizeof *obj, GFP_KERNEL);
2844 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
2845 down_write(&obj->uevent.uobject.mutex);
2847 if (cmd->srq_type == IB_SRQT_XRC) {
2848 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
2849 if (!attr.ext.xrc.xrcd) {
2854 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2855 atomic_inc(&obj->uxrcd->refcnt);
2857 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
2858 if (!attr.ext.xrc.cq) {
2864 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
2870 attr.event_handler = ib_uverbs_srq_event_handler;
2871 attr.srq_context = file;
2872 attr.srq_type = cmd->srq_type;
2873 attr.attr.max_wr = cmd->max_wr;
2874 attr.attr.max_sge = cmd->max_sge;
2875 attr.attr.srq_limit = cmd->srq_limit;
2877 obj->uevent.events_reported = 0;
2878 INIT_LIST_HEAD(&obj->uevent.event_list);
2880 srq = pd->device->create_srq(pd, &attr, udata);
2886 srq->device = pd->device;
2888 srq->srq_type = cmd->srq_type;
2889 srq->uobject = &obj->uevent.uobject;
2890 srq->event_handler = attr.event_handler;
2891 srq->srq_context = attr.srq_context;
2893 if (cmd->srq_type == IB_SRQT_XRC) {
2894 srq->ext.xrc.cq = attr.ext.xrc.cq;
2895 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
2896 atomic_inc(&attr.ext.xrc.cq->usecnt);
2897 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
2900 atomic_inc(&pd->usecnt);
2901 atomic_set(&srq->usecnt, 0);
2903 obj->uevent.uobject.object = srq;
2904 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2908 memset(&resp, 0, sizeof resp);
2909 resp.srq_handle = obj->uevent.uobject.id;
2910 resp.max_wr = attr.attr.max_wr;
2911 resp.max_sge = attr.attr.max_sge;
2912 if (cmd->srq_type == IB_SRQT_XRC)
2913 resp.srqn = srq->ext.xrc.srq_num;
2915 if (copy_to_user((void __user *) (unsigned long) cmd->response,
2916 &resp, sizeof resp)) {
2921 if (cmd->srq_type == IB_SRQT_XRC) {
2922 put_uobj_read(xrcd_uobj);
2923 put_cq_read(attr.ext.xrc.cq);
2927 mutex_lock(&file->mutex);
2928 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
2929 mutex_unlock(&file->mutex);
2931 obj->uevent.uobject.live = 1;
2933 up_write(&obj->uevent.uobject.mutex);
2938 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
2941 ib_destroy_srq(srq);
2947 if (cmd->srq_type == IB_SRQT_XRC)
2948 put_cq_read(attr.ext.xrc.cq);
2951 if (cmd->srq_type == IB_SRQT_XRC) {
2952 atomic_dec(&obj->uxrcd->refcnt);
2953 put_uobj_read(xrcd_uobj);
2957 put_uobj_write(&obj->uevent.uobject);
2961 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
2962 const char __user *buf, int in_len,
2965 struct ib_uverbs_create_srq cmd;
2966 struct ib_uverbs_create_xsrq xcmd;
2967 struct ib_uverbs_create_srq_resp resp;
2968 struct ib_udata udata;
2971 if (out_len < sizeof resp)
2974 if (copy_from_user(&cmd, buf, sizeof cmd))
2977 xcmd.response = cmd.response;
2978 xcmd.user_handle = cmd.user_handle;
2979 xcmd.srq_type = IB_SRQT_BASIC;
2980 xcmd.pd_handle = cmd.pd_handle;
2981 xcmd.max_wr = cmd.max_wr;
2982 xcmd.max_sge = cmd.max_sge;
2983 xcmd.srq_limit = cmd.srq_limit;
2985 INIT_UDATA(&udata, buf + sizeof cmd,
2986 (unsigned long) cmd.response + sizeof resp,
2987 in_len - sizeof cmd, out_len - sizeof resp);
2989 ret = __uverbs_create_xsrq(file, &xcmd, &udata);
2996 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
2997 const char __user *buf, int in_len, int out_len)
2999 struct ib_uverbs_create_xsrq cmd;
3000 struct ib_uverbs_create_srq_resp resp;
3001 struct ib_udata udata;
3004 if (out_len < sizeof resp)
3007 if (copy_from_user(&cmd, buf, sizeof cmd))
3010 INIT_UDATA(&udata, buf + sizeof cmd,
3011 (unsigned long) cmd.response + sizeof resp,
3012 in_len - sizeof cmd, out_len - sizeof resp);
3014 ret = __uverbs_create_xsrq(file, &cmd, &udata);
3021 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3022 const char __user *buf, int in_len,
3025 struct ib_uverbs_modify_srq cmd;
3026 struct ib_udata udata;
3028 struct ib_srq_attr attr;
3031 if (copy_from_user(&cmd, buf, sizeof cmd))
3034 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3037 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3041 attr.max_wr = cmd.max_wr;
3042 attr.srq_limit = cmd.srq_limit;
3044 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3048 return ret ? ret : in_len;
3051 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3052 const char __user *buf,
3053 int in_len, int out_len)
3055 struct ib_uverbs_query_srq cmd;
3056 struct ib_uverbs_query_srq_resp resp;
3057 struct ib_srq_attr attr;
3061 if (out_len < sizeof resp)
3064 if (copy_from_user(&cmd, buf, sizeof cmd))
3067 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3071 ret = ib_query_srq(srq, &attr);
3078 memset(&resp, 0, sizeof resp);
3080 resp.max_wr = attr.max_wr;
3081 resp.max_sge = attr.max_sge;
3082 resp.srq_limit = attr.srq_limit;
3084 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3085 &resp, sizeof resp))
3091 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3092 const char __user *buf, int in_len,
3095 struct ib_uverbs_destroy_srq cmd;
3096 struct ib_uverbs_destroy_srq_resp resp;
3097 struct ib_uobject *uobj;
3099 struct ib_uevent_object *obj;
3101 struct ib_usrq_object *us;
3102 enum ib_srq_type srq_type;
3104 if (copy_from_user(&cmd, buf, sizeof cmd))
3107 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3111 obj = container_of(uobj, struct ib_uevent_object, uobject);
3112 srq_type = srq->srq_type;
3114 ret = ib_destroy_srq(srq);
3118 put_uobj_write(uobj);
3123 if (srq_type == IB_SRQT_XRC) {
3124 us = container_of(obj, struct ib_usrq_object, uevent);
3125 atomic_dec(&us->uxrcd->refcnt);
3128 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3130 mutex_lock(&file->mutex);
3131 list_del(&uobj->list);
3132 mutex_unlock(&file->mutex);
3134 ib_uverbs_release_uevent(file, obj);
3136 memset(&resp, 0, sizeof resp);
3137 resp.events_reported = obj->events_reported;
3141 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3142 &resp, sizeof resp))
3145 return ret ? ret : in_len;