2 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
3 * Copyright (c) 2005-2007 Network Appliance, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the BSD-type
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
18 * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials provided
21 * with the distribution.
23 * Neither the name of the Network Appliance, Inc. nor the names of
24 * its contributors may be used to endorse or promote products
25 * derived from this software without specific prior written
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * Author: Tom Tucker <tom@opengridcomputing.com>
43 #include <linux/sunrpc/svc_xprt.h>
44 #include <linux/sunrpc/addr.h>
45 #include <linux/sunrpc/debug.h>
46 #include <linux/sunrpc/rpc_rdma.h>
47 #include <linux/interrupt.h>
48 #include <linux/sched.h>
49 #include <linux/slab.h>
50 #include <linux/spinlock.h>
51 #include <linux/workqueue.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/rdma_cm.h>
54 #include <linux/sunrpc/svc_rdma.h>
55 #include <linux/export.h>
56 #include "xprt_rdma.h"
58 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
60 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int);
61 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
63 struct sockaddr *sa, int salen,
65 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
66 static void svc_rdma_release_rqst(struct svc_rqst *);
67 static void svc_rdma_detach(struct svc_xprt *xprt);
68 static void svc_rdma_free(struct svc_xprt *xprt);
69 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
70 static int svc_rdma_secure_port(struct svc_rqst *);
71 static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
73 static struct svc_xprt_ops svc_rdma_ops = {
74 .xpo_create = svc_rdma_create,
75 .xpo_recvfrom = svc_rdma_recvfrom,
76 .xpo_sendto = svc_rdma_sendto,
77 .xpo_release_rqst = svc_rdma_release_rqst,
78 .xpo_detach = svc_rdma_detach,
79 .xpo_free = svc_rdma_free,
80 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
81 .xpo_has_wspace = svc_rdma_has_wspace,
82 .xpo_accept = svc_rdma_accept,
83 .xpo_secure_port = svc_rdma_secure_port,
84 .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
87 struct svc_xprt_class svc_rdma_class = {
89 .xcl_owner = THIS_MODULE,
90 .xcl_ops = &svc_rdma_ops,
91 .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA,
92 .xcl_ident = XPRT_TRANSPORT_RDMA,
95 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
96 static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *, struct net *,
97 struct sockaddr *, int, int);
98 static void svc_rdma_bc_detach(struct svc_xprt *);
99 static void svc_rdma_bc_free(struct svc_xprt *);
101 static struct svc_xprt_ops svc_rdma_bc_ops = {
102 .xpo_create = svc_rdma_bc_create,
103 .xpo_detach = svc_rdma_bc_detach,
104 .xpo_free = svc_rdma_bc_free,
105 .xpo_prep_reply_hdr = svc_rdma_prep_reply_hdr,
106 .xpo_secure_port = svc_rdma_secure_port,
109 struct svc_xprt_class svc_rdma_bc_class = {
110 .xcl_name = "rdma-bc",
111 .xcl_owner = THIS_MODULE,
112 .xcl_ops = &svc_rdma_bc_ops,
113 .xcl_max_payload = (1024 - RPCRDMA_HDRLEN_MIN)
116 static struct svc_xprt *svc_rdma_bc_create(struct svc_serv *serv,
118 struct sockaddr *sa, int salen,
121 struct svcxprt_rdma *cma_xprt;
122 struct svc_xprt *xprt;
124 cma_xprt = rdma_create_xprt(serv, 0);
126 return ERR_PTR(-ENOMEM);
127 xprt = &cma_xprt->sc_xprt;
129 svc_xprt_init(net, &svc_rdma_bc_class, xprt, serv);
130 set_bit(XPT_CONG_CTRL, &xprt->xpt_flags);
131 serv->sv_bc_xprt = xprt;
133 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
137 static void svc_rdma_bc_detach(struct svc_xprt *xprt)
139 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
142 static void svc_rdma_bc_free(struct svc_xprt *xprt)
144 struct svcxprt_rdma *rdma =
145 container_of(xprt, struct svcxprt_rdma, sc_xprt);
147 dprintk("svcrdma: %s(%p)\n", __func__, xprt);
151 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
153 static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt,
156 struct svc_rdma_op_ctxt *ctxt;
158 ctxt = kmalloc(sizeof(*ctxt), flags);
161 INIT_LIST_HEAD(&ctxt->list);
166 static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt)
170 /* Each RPC/RDMA credit can consume a number of send
171 * and receive WQEs. One ctxt is allocated for each.
173 i = xprt->sc_sq_depth + xprt->sc_rq_depth;
176 struct svc_rdma_op_ctxt *ctxt;
178 ctxt = alloc_ctxt(xprt, GFP_KERNEL);
180 dprintk("svcrdma: No memory for RDMA ctxt\n");
183 list_add(&ctxt->list, &xprt->sc_ctxts);
188 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
190 struct svc_rdma_op_ctxt *ctxt = NULL;
192 spin_lock(&xprt->sc_ctxt_lock);
193 xprt->sc_ctxt_used++;
194 if (list_empty(&xprt->sc_ctxts))
197 ctxt = list_first_entry(&xprt->sc_ctxts,
198 struct svc_rdma_op_ctxt, list);
199 list_del(&ctxt->list);
200 spin_unlock(&xprt->sc_ctxt_lock);
204 ctxt->mapped_sges = 0;
209 /* Either pre-allocation missed the mark, or send
210 * queue accounting is broken.
212 spin_unlock(&xprt->sc_ctxt_lock);
214 ctxt = alloc_ctxt(xprt, GFP_NOIO);
218 spin_lock(&xprt->sc_ctxt_lock);
219 xprt->sc_ctxt_used--;
220 spin_unlock(&xprt->sc_ctxt_lock);
221 WARN_ONCE(1, "svcrdma: empty RDMA ctxt list?\n");
225 void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
227 struct svcxprt_rdma *xprt = ctxt->xprt;
228 struct ib_device *device = xprt->sc_cm_id->device;
229 u32 lkey = xprt->sc_pd->local_dma_lkey;
232 for (i = 0; i < ctxt->mapped_sges; i++) {
234 * Unmap the DMA addr in the SGE if the lkey matches
235 * the local_dma_lkey, otherwise, ignore it since it is
236 * an FRMR lkey and will be unmapped later when the
237 * last WR that uses it completes.
239 if (ctxt->sge[i].lkey == lkey)
240 ib_dma_unmap_page(device,
245 ctxt->mapped_sges = 0;
248 void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
250 struct svcxprt_rdma *xprt = ctxt->xprt;
254 for (i = 0; i < ctxt->count; i++)
255 put_page(ctxt->pages[i]);
257 spin_lock(&xprt->sc_ctxt_lock);
258 xprt->sc_ctxt_used--;
259 list_add(&ctxt->list, &xprt->sc_ctxts);
260 spin_unlock(&xprt->sc_ctxt_lock);
263 static void svc_rdma_destroy_ctxts(struct svcxprt_rdma *xprt)
265 while (!list_empty(&xprt->sc_ctxts)) {
266 struct svc_rdma_op_ctxt *ctxt;
268 ctxt = list_first_entry(&xprt->sc_ctxts,
269 struct svc_rdma_op_ctxt, list);
270 list_del(&ctxt->list);
275 /* QP event handler */
276 static void qp_event_handler(struct ib_event *event, void *context)
278 struct svc_xprt *xprt = context;
280 switch (event->event) {
281 /* These are considered benign events */
282 case IB_EVENT_PATH_MIG:
283 case IB_EVENT_COMM_EST:
284 case IB_EVENT_SQ_DRAINED:
285 case IB_EVENT_QP_LAST_WQE_REACHED:
286 dprintk("svcrdma: QP event %s (%d) received for QP=%p\n",
287 ib_event_msg(event->event), event->event,
290 /* These are considered fatal events */
291 case IB_EVENT_PATH_MIG_ERR:
292 case IB_EVENT_QP_FATAL:
293 case IB_EVENT_QP_REQ_ERR:
294 case IB_EVENT_QP_ACCESS_ERR:
295 case IB_EVENT_DEVICE_FATAL:
297 dprintk("svcrdma: QP ERROR event %s (%d) received for QP=%p, "
298 "closing transport\n",
299 ib_event_msg(event->event), event->event,
301 set_bit(XPT_CLOSE, &xprt->xpt_flags);
307 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
308 * @cq: completion queue
312 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
314 struct svcxprt_rdma *xprt = cq->cq_context;
315 struct ib_cqe *cqe = wc->wr_cqe;
316 struct svc_rdma_op_ctxt *ctxt;
318 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
319 ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
320 svc_rdma_unmap_dma(ctxt);
322 if (wc->status != IB_WC_SUCCESS)
325 /* All wc fields are now known to be valid */
326 ctxt->byte_len = wc->byte_len;
327 spin_lock(&xprt->sc_rq_dto_lock);
328 list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q);
329 spin_unlock(&xprt->sc_rq_dto_lock);
331 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
332 if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
334 svc_xprt_enqueue(&xprt->sc_xprt);
338 if (wc->status != IB_WC_WR_FLUSH_ERR)
339 pr_warn("svcrdma: receive: %s (%u/0x%x)\n",
340 ib_wc_status_msg(wc->status),
341 wc->status, wc->vendor_err);
342 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
343 svc_rdma_put_context(ctxt, 1);
346 svc_xprt_put(&xprt->sc_xprt);
349 static void svc_rdma_send_wc_common(struct svcxprt_rdma *xprt,
353 if (wc->status != IB_WC_SUCCESS)
357 atomic_inc(&xprt->sc_sq_avail);
358 wake_up(&xprt->sc_send_wait);
362 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
363 if (wc->status != IB_WC_WR_FLUSH_ERR)
364 pr_err("svcrdma: %s: %s (%u/0x%x)\n",
365 opname, ib_wc_status_msg(wc->status),
366 wc->status, wc->vendor_err);
370 static void svc_rdma_send_wc_common_put(struct ib_cq *cq, struct ib_wc *wc,
373 struct svcxprt_rdma *xprt = cq->cq_context;
375 svc_rdma_send_wc_common(xprt, wc, opname);
376 svc_xprt_put(&xprt->sc_xprt);
380 * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
381 * @cq: completion queue
385 void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
387 struct ib_cqe *cqe = wc->wr_cqe;
388 struct svc_rdma_op_ctxt *ctxt;
390 svc_rdma_send_wc_common_put(cq, wc, "send");
392 ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
393 svc_rdma_unmap_dma(ctxt);
394 svc_rdma_put_context(ctxt, 1);
398 * svc_rdma_wc_reg - Invoked by RDMA provider for each polled FASTREG WC
399 * @cq: completion queue
403 void svc_rdma_wc_reg(struct ib_cq *cq, struct ib_wc *wc)
405 svc_rdma_send_wc_common_put(cq, wc, "fastreg");
409 * svc_rdma_wc_read - Invoked by RDMA provider for each polled Read WC
410 * @cq: completion queue
414 void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc)
416 struct svcxprt_rdma *xprt = cq->cq_context;
417 struct ib_cqe *cqe = wc->wr_cqe;
418 struct svc_rdma_op_ctxt *ctxt;
420 svc_rdma_send_wc_common(xprt, wc, "read");
422 ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
423 svc_rdma_unmap_dma(ctxt);
424 svc_rdma_put_frmr(xprt, ctxt->frmr);
426 if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
427 struct svc_rdma_op_ctxt *read_hdr;
429 read_hdr = ctxt->read_hdr;
430 spin_lock(&xprt->sc_rq_dto_lock);
431 list_add_tail(&read_hdr->list,
432 &xprt->sc_read_complete_q);
433 spin_unlock(&xprt->sc_rq_dto_lock);
435 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
436 svc_xprt_enqueue(&xprt->sc_xprt);
439 svc_rdma_put_context(ctxt, 0);
440 svc_xprt_put(&xprt->sc_xprt);
444 * svc_rdma_wc_inv - Invoked by RDMA provider for each polled LOCAL_INV WC
445 * @cq: completion queue
449 void svc_rdma_wc_inv(struct ib_cq *cq, struct ib_wc *wc)
451 svc_rdma_send_wc_common_put(cq, wc, "localInv");
454 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
457 struct svcxprt_rdma *cma_xprt = kzalloc(sizeof *cma_xprt, GFP_KERNEL);
461 svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv);
462 INIT_LIST_HEAD(&cma_xprt->sc_accept_q);
463 INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
464 INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
465 INIT_LIST_HEAD(&cma_xprt->sc_frmr_q);
466 INIT_LIST_HEAD(&cma_xprt->sc_ctxts);
467 INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
468 init_waitqueue_head(&cma_xprt->sc_send_wait);
470 spin_lock_init(&cma_xprt->sc_lock);
471 spin_lock_init(&cma_xprt->sc_rq_dto_lock);
472 spin_lock_init(&cma_xprt->sc_frmr_q_lock);
473 spin_lock_init(&cma_xprt->sc_ctxt_lock);
474 spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
477 * Note that this implies that the underlying transport support
478 * has some form of congestion control (see RFC 7530 section 3.1
479 * paragraph 2). For now, we assume that all supported RDMA
480 * transports are suitable here.
482 set_bit(XPT_CONG_CTRL, &cma_xprt->sc_xprt.xpt_flags);
485 set_bit(XPT_LISTENER, &cma_xprt->sc_xprt.xpt_flags);
490 int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags)
492 struct ib_recv_wr recv_wr, *bad_recv_wr;
493 struct svc_rdma_op_ctxt *ctxt;
500 ctxt = svc_rdma_get_context(xprt);
502 ctxt->direction = DMA_FROM_DEVICE;
503 ctxt->cqe.done = svc_rdma_wc_receive;
504 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
505 if (sge_no >= xprt->sc_max_sge) {
506 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
509 page = alloc_page(flags);
512 ctxt->pages[sge_no] = page;
513 pa = ib_dma_map_page(xprt->sc_cm_id->device,
516 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
518 svc_rdma_count_mappings(xprt, ctxt);
519 ctxt->sge[sge_no].addr = pa;
520 ctxt->sge[sge_no].length = PAGE_SIZE;
521 ctxt->sge[sge_no].lkey = xprt->sc_pd->local_dma_lkey;
522 ctxt->count = sge_no + 1;
526 recv_wr.sg_list = &ctxt->sge[0];
527 recv_wr.num_sge = ctxt->count;
528 recv_wr.wr_cqe = &ctxt->cqe;
530 svc_xprt_get(&xprt->sc_xprt);
531 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
533 svc_rdma_unmap_dma(ctxt);
534 svc_rdma_put_context(ctxt, 1);
535 svc_xprt_put(&xprt->sc_xprt);
540 svc_rdma_unmap_dma(ctxt);
541 svc_rdma_put_context(ctxt, 1);
545 int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags)
549 ret = svc_rdma_post_recv(xprt, flags);
551 pr_err("svcrdma: could not post a receive buffer, err=%d.\n",
553 pr_err("svcrdma: closing transport %p.\n", xprt);
554 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
561 svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt,
562 struct rdma_conn_param *param)
564 const struct rpcrdma_connect_private *pmsg = param->private_data;
567 pmsg->cp_magic == rpcrdma_cmp_magic &&
568 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
569 newxprt->sc_snd_w_inv = pmsg->cp_flags &
570 RPCRDMA_CMP_F_SND_W_INV_OK;
572 dprintk("svcrdma: client send_size %u, recv_size %u "
573 "remote inv %ssupported\n",
574 rpcrdma_decode_buffer_size(pmsg->cp_send_size),
575 rpcrdma_decode_buffer_size(pmsg->cp_recv_size),
576 newxprt->sc_snd_w_inv ? "" : "un");
581 * This function handles the CONNECT_REQUEST event on a listening
582 * endpoint. It is passed the cma_id for the _new_ connection. The context in
583 * this cma_id is inherited from the listening cma_id and is the svc_xprt
584 * structure for the listening endpoint.
586 * This function creates a new xprt for the new connection and enqueues it on
587 * the accept queue for the listent xprt. When the listen thread is kicked, it
588 * will call the recvfrom method on the listen xprt which will accept the new
591 static void handle_connect_req(struct rdma_cm_id *new_cma_id,
592 struct rdma_conn_param *param)
594 struct svcxprt_rdma *listen_xprt = new_cma_id->context;
595 struct svcxprt_rdma *newxprt;
598 /* Create a new transport */
599 newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
601 dprintk("svcrdma: failed to create new transport\n");
604 newxprt->sc_cm_id = new_cma_id;
605 new_cma_id->context = newxprt;
606 dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
607 newxprt, newxprt->sc_cm_id, listen_xprt);
608 svc_rdma_parse_connect_private(newxprt, param);
610 /* Save client advertised inbound read limit for use later in accept. */
611 newxprt->sc_ord = param->initiator_depth;
613 /* Set the local and remote addresses in the transport */
614 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
615 svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
616 sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
617 svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
620 * Enqueue the new transport on the accept queue of the listening
623 spin_lock_bh(&listen_xprt->sc_lock);
624 list_add_tail(&newxprt->sc_accept_q, &listen_xprt->sc_accept_q);
625 spin_unlock_bh(&listen_xprt->sc_lock);
627 set_bit(XPT_CONN, &listen_xprt->sc_xprt.xpt_flags);
628 svc_xprt_enqueue(&listen_xprt->sc_xprt);
632 * Handles events generated on the listening endpoint. These events will be
633 * either be incoming connect requests or adapter removal events.
635 static int rdma_listen_handler(struct rdma_cm_id *cma_id,
636 struct rdma_cm_event *event)
638 struct svcxprt_rdma *xprt = cma_id->context;
641 switch (event->event) {
642 case RDMA_CM_EVENT_CONNECT_REQUEST:
643 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
644 "event = %s (%d)\n", cma_id, cma_id->context,
645 rdma_event_msg(event->event), event->event);
646 handle_connect_req(cma_id, &event->param.conn);
649 case RDMA_CM_EVENT_ESTABLISHED:
650 /* Accept complete */
651 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
652 "cm_id=%p\n", xprt, cma_id);
655 case RDMA_CM_EVENT_DEVICE_REMOVAL:
656 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
659 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
663 dprintk("svcrdma: Unexpected event on listening endpoint %p, "
664 "event = %s (%d)\n", cma_id,
665 rdma_event_msg(event->event), event->event);
672 static int rdma_cma_handler(struct rdma_cm_id *cma_id,
673 struct rdma_cm_event *event)
675 struct svc_xprt *xprt = cma_id->context;
676 struct svcxprt_rdma *rdma =
677 container_of(xprt, struct svcxprt_rdma, sc_xprt);
678 switch (event->event) {
679 case RDMA_CM_EVENT_ESTABLISHED:
680 /* Accept complete */
682 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
683 "cm_id=%p\n", xprt, cma_id);
684 clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags);
685 svc_xprt_enqueue(xprt);
687 case RDMA_CM_EVENT_DISCONNECTED:
688 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
691 set_bit(XPT_CLOSE, &xprt->xpt_flags);
692 svc_xprt_enqueue(xprt);
696 case RDMA_CM_EVENT_DEVICE_REMOVAL:
697 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
698 "event = %s (%d)\n", cma_id, xprt,
699 rdma_event_msg(event->event), event->event);
701 set_bit(XPT_CLOSE, &xprt->xpt_flags);
702 svc_xprt_enqueue(xprt);
707 dprintk("svcrdma: Unexpected event on DTO endpoint %p, "
708 "event = %s (%d)\n", cma_id,
709 rdma_event_msg(event->event), event->event);
716 * Create a listening RDMA service endpoint.
718 static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
720 struct sockaddr *sa, int salen,
723 struct rdma_cm_id *listen_id;
724 struct svcxprt_rdma *cma_xprt;
727 dprintk("svcrdma: Creating RDMA socket\n");
728 if ((sa->sa_family != AF_INET) && (sa->sa_family != AF_INET6)) {
729 dprintk("svcrdma: Address family %d is not supported.\n", sa->sa_family);
730 return ERR_PTR(-EAFNOSUPPORT);
732 cma_xprt = rdma_create_xprt(serv, 1);
734 return ERR_PTR(-ENOMEM);
736 listen_id = rdma_create_id(&init_net, rdma_listen_handler, cma_xprt,
737 RDMA_PS_TCP, IB_QPT_RC);
738 if (IS_ERR(listen_id)) {
739 ret = PTR_ERR(listen_id);
740 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
744 /* Allow both IPv4 and IPv6 sockets to bind a single port
747 #if IS_ENABLED(CONFIG_IPV6)
748 ret = rdma_set_afonly(listen_id, 1);
750 dprintk("svcrdma: rdma_set_afonly failed = %d\n", ret);
754 ret = rdma_bind_addr(listen_id, sa);
756 dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
759 cma_xprt->sc_cm_id = listen_id;
761 ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
763 dprintk("svcrdma: rdma_listen failed = %d\n", ret);
768 * We need to use the address from the cm_id in case the
769 * caller specified 0 for the port number.
771 sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr;
772 svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
774 return &cma_xprt->sc_xprt;
777 rdma_destroy_id(listen_id);
783 static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
786 struct scatterlist *sg;
787 struct svc_rdma_fastreg_mr *frmr;
790 frmr = kmalloc(sizeof(*frmr), GFP_KERNEL);
794 num_sg = min_t(u32, RPCSVC_MAXPAGES, xprt->sc_frmr_pg_list_len);
795 mr = ib_alloc_mr(xprt->sc_pd, IB_MR_TYPE_MEM_REG, num_sg);
799 sg = kcalloc(RPCSVC_MAXPAGES, sizeof(*sg), GFP_KERNEL);
803 sg_init_table(sg, RPCSVC_MAXPAGES);
807 INIT_LIST_HEAD(&frmr->frmr_list);
815 return ERR_PTR(-ENOMEM);
818 static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
820 struct svc_rdma_fastreg_mr *frmr;
822 while (!list_empty(&xprt->sc_frmr_q)) {
823 frmr = list_entry(xprt->sc_frmr_q.next,
824 struct svc_rdma_fastreg_mr, frmr_list);
825 list_del_init(&frmr->frmr_list);
827 ib_dereg_mr(frmr->mr);
832 struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *rdma)
834 struct svc_rdma_fastreg_mr *frmr = NULL;
836 spin_lock(&rdma->sc_frmr_q_lock);
837 if (!list_empty(&rdma->sc_frmr_q)) {
838 frmr = list_entry(rdma->sc_frmr_q.next,
839 struct svc_rdma_fastreg_mr, frmr_list);
840 list_del_init(&frmr->frmr_list);
843 spin_unlock(&rdma->sc_frmr_q_lock);
847 return rdma_alloc_frmr(rdma);
850 void svc_rdma_put_frmr(struct svcxprt_rdma *rdma,
851 struct svc_rdma_fastreg_mr *frmr)
854 ib_dma_unmap_sg(rdma->sc_cm_id->device,
855 frmr->sg, frmr->sg_nents, frmr->direction);
856 spin_lock(&rdma->sc_frmr_q_lock);
857 WARN_ON_ONCE(!list_empty(&frmr->frmr_list));
858 list_add(&frmr->frmr_list, &rdma->sc_frmr_q);
859 spin_unlock(&rdma->sc_frmr_q_lock);
864 * This is the xpo_recvfrom function for listening endpoints. Its
865 * purpose is to accept incoming connections. The CMA callback handler
866 * has already created a new transport and attached it to the new CMA
869 * There is a queue of pending connections hung on the listening
870 * transport. This queue contains the new svc_xprt structure. This
871 * function takes svc_xprt structures off the accept_q and completes
874 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
876 struct svcxprt_rdma *listen_rdma;
877 struct svcxprt_rdma *newxprt = NULL;
878 struct rdma_conn_param conn_param;
879 struct rpcrdma_connect_private pmsg;
880 struct ib_qp_init_attr qp_attr;
881 struct ib_device *dev;
882 struct sockaddr *sap;
886 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
887 clear_bit(XPT_CONN, &xprt->xpt_flags);
888 /* Get the next entry off the accept list */
889 spin_lock_bh(&listen_rdma->sc_lock);
890 if (!list_empty(&listen_rdma->sc_accept_q)) {
891 newxprt = list_entry(listen_rdma->sc_accept_q.next,
892 struct svcxprt_rdma, sc_accept_q);
893 list_del_init(&newxprt->sc_accept_q);
895 if (!list_empty(&listen_rdma->sc_accept_q))
896 set_bit(XPT_CONN, &listen_rdma->sc_xprt.xpt_flags);
897 spin_unlock_bh(&listen_rdma->sc_lock);
901 dprintk("svcrdma: newxprt from accept queue = %p, cm_id=%p\n",
902 newxprt, newxprt->sc_cm_id);
904 dev = newxprt->sc_cm_id->device;
905 newxprt->sc_port_num = newxprt->sc_cm_id->port_num;
907 /* Qualify the transport resource defaults with the
908 * capabilities of this particular device */
909 newxprt->sc_max_sge = min((size_t)dev->attrs.max_sge,
910 (size_t)RPCSVC_MAXPAGES);
911 newxprt->sc_max_sge_rd = min_t(size_t, dev->attrs.max_sge_rd,
913 newxprt->sc_max_req_size = svcrdma_max_req_size;
914 newxprt->sc_max_requests = min_t(u32, dev->attrs.max_qp_wr,
915 svcrdma_max_requests);
916 newxprt->sc_fc_credits = cpu_to_be32(newxprt->sc_max_requests);
917 newxprt->sc_max_bc_requests = min_t(u32, dev->attrs.max_qp_wr,
918 svcrdma_max_bc_requests);
919 newxprt->sc_rq_depth = newxprt->sc_max_requests +
920 newxprt->sc_max_bc_requests;
921 newxprt->sc_sq_depth = newxprt->sc_rq_depth;
922 atomic_set(&newxprt->sc_sq_avail, newxprt->sc_sq_depth);
924 if (!svc_rdma_prealloc_ctxts(newxprt))
928 * Limit ORD based on client limit, local device limit, and
929 * configured svcrdma limit.
931 newxprt->sc_ord = min_t(size_t, dev->attrs.max_qp_rd_atom, newxprt->sc_ord);
932 newxprt->sc_ord = min_t(size_t, svcrdma_ord, newxprt->sc_ord);
934 newxprt->sc_pd = ib_alloc_pd(dev, 0);
935 if (IS_ERR(newxprt->sc_pd)) {
936 dprintk("svcrdma: error creating PD for connect request\n");
939 newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth,
940 0, IB_POLL_WORKQUEUE);
941 if (IS_ERR(newxprt->sc_sq_cq)) {
942 dprintk("svcrdma: error creating SQ CQ for connect request\n");
945 newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_rq_depth,
946 0, IB_POLL_WORKQUEUE);
947 if (IS_ERR(newxprt->sc_rq_cq)) {
948 dprintk("svcrdma: error creating RQ CQ for connect request\n");
952 memset(&qp_attr, 0, sizeof qp_attr);
953 qp_attr.event_handler = qp_event_handler;
954 qp_attr.qp_context = &newxprt->sc_xprt;
955 qp_attr.port_num = newxprt->sc_cm_id->port_num;
956 qp_attr.cap.max_rdma_ctxs = newxprt->sc_max_requests;
957 qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
958 qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth;
959 qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
960 qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
961 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
962 qp_attr.qp_type = IB_QPT_RC;
963 qp_attr.send_cq = newxprt->sc_sq_cq;
964 qp_attr.recv_cq = newxprt->sc_rq_cq;
965 dprintk("svcrdma: newxprt->sc_cm_id=%p, newxprt->sc_pd=%p\n",
966 newxprt->sc_cm_id, newxprt->sc_pd);
967 dprintk(" cap.max_send_wr = %d, cap.max_recv_wr = %d\n",
968 qp_attr.cap.max_send_wr, qp_attr.cap.max_recv_wr);
969 dprintk(" cap.max_send_sge = %d, cap.max_recv_sge = %d\n",
970 qp_attr.cap.max_send_sge, qp_attr.cap.max_recv_sge);
972 ret = rdma_create_qp(newxprt->sc_cm_id, newxprt->sc_pd, &qp_attr);
974 dprintk("svcrdma: failed to create QP, ret=%d\n", ret);
977 newxprt->sc_qp = newxprt->sc_cm_id->qp;
980 * Use the most secure set of MR resources based on the
981 * transport type and available memory management features in
982 * the device. Here's the table implemented below:
984 * Fast Global DMA Remote WR
986 * Sup'd Sup'd Needed Needed
998 * NB: iWARP requires remote write access for the data sink
999 * of an RDMA_READ. IB does not.
1001 newxprt->sc_reader = rdma_read_chunk_lcl;
1002 if (dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
1003 newxprt->sc_frmr_pg_list_len =
1004 dev->attrs.max_fast_reg_page_list_len;
1005 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_FAST_REG;
1006 newxprt->sc_reader = rdma_read_chunk_frmr;
1008 newxprt->sc_snd_w_inv = false;
1011 * Determine if a DMA MR is required and if so, what privs are required
1013 if (!rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num) &&
1014 !rdma_ib_or_roce(dev, newxprt->sc_cm_id->port_num))
1017 if (rdma_protocol_iwarp(dev, newxprt->sc_cm_id->port_num))
1018 newxprt->sc_dev_caps |= SVCRDMA_DEVCAP_READ_W_INV;
1020 /* Post receive buffers */
1021 for (i = 0; i < newxprt->sc_max_requests; i++) {
1022 ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
1024 dprintk("svcrdma: failure posting receive buffers\n");
1029 /* Swap out the handler */
1030 newxprt->sc_cm_id->event_handler = rdma_cma_handler;
1032 /* Construct RDMA-CM private message */
1033 pmsg.cp_magic = rpcrdma_cmp_magic;
1034 pmsg.cp_version = RPCRDMA_CMP_VERSION;
1036 pmsg.cp_send_size = pmsg.cp_recv_size =
1037 rpcrdma_encode_buffer_size(newxprt->sc_max_req_size);
1039 /* Accept Connection */
1040 set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
1041 memset(&conn_param, 0, sizeof conn_param);
1042 conn_param.responder_resources = 0;
1043 conn_param.initiator_depth = newxprt->sc_ord;
1044 conn_param.private_data = &pmsg;
1045 conn_param.private_data_len = sizeof(pmsg);
1046 ret = rdma_accept(newxprt->sc_cm_id, &conn_param);
1048 dprintk("svcrdma: failed to accept new connection, ret=%d\n",
1053 dprintk("svcrdma: new connection %p accepted:\n", newxprt);
1054 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
1055 dprintk(" local address : %pIS:%u\n", sap, rpc_get_port(sap));
1056 sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
1057 dprintk(" remote address : %pIS:%u\n", sap, rpc_get_port(sap));
1058 dprintk(" max_sge : %d\n", newxprt->sc_max_sge);
1059 dprintk(" max_sge_rd : %d\n", newxprt->sc_max_sge_rd);
1060 dprintk(" sq_depth : %d\n", newxprt->sc_sq_depth);
1061 dprintk(" max_requests : %d\n", newxprt->sc_max_requests);
1062 dprintk(" ord : %d\n", newxprt->sc_ord);
1064 return &newxprt->sc_xprt;
1067 dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
1068 /* Take a reference in case the DTO handler runs */
1069 svc_xprt_get(&newxprt->sc_xprt);
1070 if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
1071 ib_destroy_qp(newxprt->sc_qp);
1072 rdma_destroy_id(newxprt->sc_cm_id);
1073 /* This call to put will destroy the transport */
1074 svc_xprt_put(&newxprt->sc_xprt);
1078 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
1083 * When connected, an svc_xprt has at least two references:
1085 * - A reference held by the cm_id between the ESTABLISHED and
1086 * DISCONNECTED events. If the remote peer disconnected first, this
1087 * reference could be gone.
1089 * - A reference held by the svc_recv code that called this function
1090 * as part of close processing.
1092 * At a minimum one references should still be held.
1094 static void svc_rdma_detach(struct svc_xprt *xprt)
1096 struct svcxprt_rdma *rdma =
1097 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1098 dprintk("svc: svc_rdma_detach(%p)\n", xprt);
1100 /* Disconnect and flush posted WQE */
1101 rdma_disconnect(rdma->sc_cm_id);
1104 static void __svc_rdma_free(struct work_struct *work)
1106 struct svcxprt_rdma *rdma =
1107 container_of(work, struct svcxprt_rdma, sc_work);
1108 struct svc_xprt *xprt = &rdma->sc_xprt;
1110 dprintk("svcrdma: %s(%p)\n", __func__, rdma);
1112 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1113 ib_drain_qp(rdma->sc_qp);
1115 /* We should only be called from kref_put */
1116 if (kref_read(&xprt->xpt_ref) != 0)
1117 pr_err("svcrdma: sc_xprt still in use? (%d)\n",
1118 kref_read(&xprt->xpt_ref));
1121 * Destroy queued, but not processed read completions. Note
1122 * that this cleanup has to be done before destroying the
1123 * cm_id because the device ptr is needed to unmap the dma in
1124 * svc_rdma_put_context.
1126 while (!list_empty(&rdma->sc_read_complete_q)) {
1127 struct svc_rdma_op_ctxt *ctxt;
1128 ctxt = list_first_entry(&rdma->sc_read_complete_q,
1129 struct svc_rdma_op_ctxt, list);
1130 list_del(&ctxt->list);
1131 svc_rdma_put_context(ctxt, 1);
1134 /* Destroy queued, but not processed recv completions */
1135 while (!list_empty(&rdma->sc_rq_dto_q)) {
1136 struct svc_rdma_op_ctxt *ctxt;
1137 ctxt = list_first_entry(&rdma->sc_rq_dto_q,
1138 struct svc_rdma_op_ctxt, list);
1139 list_del(&ctxt->list);
1140 svc_rdma_put_context(ctxt, 1);
1143 /* Warn if we leaked a resource or under-referenced */
1144 if (rdma->sc_ctxt_used != 0)
1145 pr_err("svcrdma: ctxt still in use? (%d)\n",
1146 rdma->sc_ctxt_used);
1148 /* Final put of backchannel client transport */
1149 if (xprt->xpt_bc_xprt) {
1150 xprt_put(xprt->xpt_bc_xprt);
1151 xprt->xpt_bc_xprt = NULL;
1154 rdma_dealloc_frmr_q(rdma);
1155 svc_rdma_destroy_rw_ctxts(rdma);
1156 svc_rdma_destroy_ctxts(rdma);
1158 /* Destroy the QP if present (not a listener) */
1159 if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
1160 ib_destroy_qp(rdma->sc_qp);
1162 if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
1163 ib_free_cq(rdma->sc_sq_cq);
1165 if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq))
1166 ib_free_cq(rdma->sc_rq_cq);
1168 if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
1169 ib_dealloc_pd(rdma->sc_pd);
1171 /* Destroy the CM ID */
1172 rdma_destroy_id(rdma->sc_cm_id);
1177 static void svc_rdma_free(struct svc_xprt *xprt)
1179 struct svcxprt_rdma *rdma =
1180 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1181 INIT_WORK(&rdma->sc_work, __svc_rdma_free);
1182 queue_work(svc_rdma_wq, &rdma->sc_work);
1185 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1187 struct svcxprt_rdma *rdma =
1188 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1191 * If there are already waiters on the SQ,
1194 if (waitqueue_active(&rdma->sc_send_wait))
1197 /* Otherwise return true. */
1201 static int svc_rdma_secure_port(struct svc_rqst *rqstp)
1206 static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
1210 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1212 struct ib_send_wr *bad_wr, *n_wr;
1217 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1221 for (n_wr = wr->next; n_wr; n_wr = n_wr->next)
1224 /* If the SQ is full, wait until an SQ entry is available */
1226 if ((atomic_sub_return(wr_count, &xprt->sc_sq_avail) < 0)) {
1227 atomic_inc(&rdma_stat_sq_starve);
1229 /* Wait until SQ WR available if SQ still full */
1230 atomic_add(wr_count, &xprt->sc_sq_avail);
1231 wait_event(xprt->sc_send_wait,
1232 atomic_read(&xprt->sc_sq_avail) > wr_count);
1233 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1237 /* Take a transport ref for each WR posted */
1238 for (i = 0; i < wr_count; i++)
1239 svc_xprt_get(&xprt->sc_xprt);
1241 /* Bump used SQ WR count and post */
1242 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1244 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1245 for (i = 0; i < wr_count; i ++)
1246 svc_xprt_put(&xprt->sc_xprt);
1247 dprintk("svcrdma: failed to post SQ WR rc=%d\n", ret);
1248 dprintk(" sc_sq_avail=%d, sc_sq_depth=%d\n",
1249 atomic_read(&xprt->sc_sq_avail),
1251 wake_up(&xprt->sc_send_wait);