2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * This file contains the top-level implementation of an RPC RDMA
46 * Naming convention: functions beginning with xprt_ are part of the
47 * transport switch. All others are RPC RDMA internal.
50 #include <linux/module.h>
51 #include <linux/init.h>
52 #include <linux/slab.h>
53 #include <linux/seq_file.h>
54 #include <linux/sunrpc/addr.h>
56 #include "xprt_rdma.h"
58 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
59 # define RPCDBG_FACILITY RPCDBG_TRANS
62 MODULE_LICENSE("Dual BSD/GPL");
64 MODULE_DESCRIPTION("RPC/RDMA Transport for Linux kernel NFS");
65 MODULE_AUTHOR("Network Appliance, Inc.");
71 static unsigned int xprt_rdma_slot_table_entries = RPCRDMA_DEF_SLOT_TABLE;
72 static unsigned int xprt_rdma_max_inline_read = RPCRDMA_DEF_INLINE;
73 static unsigned int xprt_rdma_max_inline_write = RPCRDMA_DEF_INLINE;
74 static unsigned int xprt_rdma_inline_write_padding;
75 static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR;
76 int xprt_rdma_pad_optimize = 1;
78 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
80 static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE;
81 static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE;
82 static unsigned int zero;
83 static unsigned int max_padding = PAGE_SIZE;
84 static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
85 static unsigned int max_memreg = RPCRDMA_LAST - 1;
87 static struct ctl_table_header *sunrpc_table_header;
89 static struct ctl_table xr_tunables_table[] = {
91 .procname = "rdma_slot_table_entries",
92 .data = &xprt_rdma_slot_table_entries,
93 .maxlen = sizeof(unsigned int),
95 .proc_handler = proc_dointvec_minmax,
96 .extra1 = &min_slot_table_size,
97 .extra2 = &max_slot_table_size
100 .procname = "rdma_max_inline_read",
101 .data = &xprt_rdma_max_inline_read,
102 .maxlen = sizeof(unsigned int),
104 .proc_handler = proc_dointvec,
107 .procname = "rdma_max_inline_write",
108 .data = &xprt_rdma_max_inline_write,
109 .maxlen = sizeof(unsigned int),
111 .proc_handler = proc_dointvec,
114 .procname = "rdma_inline_write_padding",
115 .data = &xprt_rdma_inline_write_padding,
116 .maxlen = sizeof(unsigned int),
118 .proc_handler = proc_dointvec_minmax,
120 .extra2 = &max_padding,
123 .procname = "rdma_memreg_strategy",
124 .data = &xprt_rdma_memreg_strategy,
125 .maxlen = sizeof(unsigned int),
127 .proc_handler = proc_dointvec_minmax,
128 .extra1 = &min_memreg,
129 .extra2 = &max_memreg,
132 .procname = "rdma_pad_optimize",
133 .data = &xprt_rdma_pad_optimize,
134 .maxlen = sizeof(unsigned int),
136 .proc_handler = proc_dointvec,
141 static struct ctl_table sunrpc_table[] = {
143 .procname = "sunrpc",
145 .child = xr_tunables_table
152 #define RPCRDMA_BIND_TO (60U * HZ)
153 #define RPCRDMA_INIT_REEST_TO (5U * HZ)
154 #define RPCRDMA_MAX_REEST_TO (30U * HZ)
155 #define RPCRDMA_IDLE_DISC_TO (5U * 60 * HZ)
157 static struct rpc_xprt_ops xprt_rdma_procs; /* forward reference */
160 xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap)
162 struct sockaddr_in *sin = (struct sockaddr_in *)sap;
165 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
166 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
168 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA;
172 xprt_rdma_format_addresses6(struct rpc_xprt *xprt, struct sockaddr *sap)
174 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
177 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
178 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
180 xprt->address_strings[RPC_DISPLAY_NETID] = RPCBIND_NETID_RDMA6;
184 xprt_rdma_format_addresses(struct rpc_xprt *xprt)
186 struct sockaddr *sap = (struct sockaddr *)
187 &rpcx_to_rdmad(xprt).addr;
190 switch (sap->sa_family) {
192 xprt_rdma_format_addresses4(xprt, sap);
195 xprt_rdma_format_addresses6(xprt, sap);
198 pr_err("rpcrdma: Unrecognized address family\n");
202 (void)rpc_ntop(sap, buf, sizeof(buf));
203 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
205 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
206 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
208 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
209 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
211 xprt->address_strings[RPC_DISPLAY_PROTO] = "rdma";
215 xprt_rdma_free_addresses(struct rpc_xprt *xprt)
219 for (i = 0; i < RPC_DISPLAY_MAX; i++)
221 case RPC_DISPLAY_PROTO:
222 case RPC_DISPLAY_NETID:
225 kfree(xprt->address_strings[i]);
230 xprt_rdma_connect_worker(struct work_struct *work)
232 struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
233 rx_connect_worker.work);
234 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
237 xprt_clear_connected(xprt);
239 dprintk("RPC: %s: %sconnect\n", __func__,
240 r_xprt->rx_ep.rep_connected != 0 ? "re" : "");
241 rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
243 xprt_wake_pending_tasks(xprt, rc);
245 dprintk("RPC: %s: exit\n", __func__);
246 xprt_clear_connecting(xprt);
250 xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
252 struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt,
255 pr_info("rpcrdma: injecting transport disconnect on xprt=%p\n", xprt);
256 rdma_disconnect(r_xprt->rx_ia.ri_id);
263 * Free all memory associated with the object, including its own.
264 * NOTE: none of the *destroy methods free memory for their top-level
265 * objects, even though they may have allocated it (they do free
266 * private memory). It's up to the caller to handle it. In this
267 * case (RDMA transport), all structure memory is inlined with the
268 * struct rpcrdma_xprt.
271 xprt_rdma_destroy(struct rpc_xprt *xprt)
273 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
275 dprintk("RPC: %s: called\n", __func__);
277 cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
279 xprt_clear_connected(xprt);
281 rpcrdma_buffer_destroy(&r_xprt->rx_buf);
282 rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
283 rpcrdma_ia_close(&r_xprt->rx_ia);
285 xprt_rdma_free_addresses(xprt);
289 dprintk("RPC: %s: returning\n", __func__);
291 module_put(THIS_MODULE);
294 static const struct rpc_timeout xprt_rdma_default_timeout = {
295 .to_initval = 60 * HZ,
296 .to_maxval = 60 * HZ,
300 * xprt_setup_rdma - Set up transport to use RDMA
302 * @args: rpc transport arguments
304 static struct rpc_xprt *
305 xprt_setup_rdma(struct xprt_create *args)
307 struct rpcrdma_create_data_internal cdata;
308 struct rpc_xprt *xprt;
309 struct rpcrdma_xprt *new_xprt;
310 struct rpcrdma_ep *new_ep;
311 struct sockaddr_in *sin;
314 if (args->addrlen > sizeof(xprt->addr)) {
315 dprintk("RPC: %s: address too large\n", __func__);
316 return ERR_PTR(-EBADF);
319 xprt = xprt_alloc(args->net, sizeof(struct rpcrdma_xprt),
320 xprt_rdma_slot_table_entries,
321 xprt_rdma_slot_table_entries);
323 dprintk("RPC: %s: couldn't allocate rpcrdma_xprt\n",
325 return ERR_PTR(-ENOMEM);
328 /* 60 second timeout, no retries */
329 xprt->timeout = &xprt_rdma_default_timeout;
330 xprt->bind_timeout = RPCRDMA_BIND_TO;
331 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
332 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
334 xprt->resvport = 0; /* privileged port not needed */
335 xprt->tsh_size = 0; /* RPC-RDMA handles framing */
336 xprt->ops = &xprt_rdma_procs;
339 * Set up RDMA-specific connect data.
342 /* Put server RDMA address in local cdata */
343 memcpy(&cdata.addr, args->dstaddr, args->addrlen);
345 /* Ensure xprt->addr holds valid server TCP (not RDMA)
346 * address, for any side protocols which peek at it */
347 xprt->prot = IPPROTO_TCP;
348 xprt->addrlen = args->addrlen;
349 memcpy(&xprt->addr, &cdata.addr, xprt->addrlen);
351 sin = (struct sockaddr_in *)&cdata.addr;
352 if (ntohs(sin->sin_port) != 0)
353 xprt_set_bound(xprt);
355 dprintk("RPC: %s: %pI4:%u\n",
356 __func__, &sin->sin_addr.s_addr, ntohs(sin->sin_port));
358 /* Set max requests */
359 cdata.max_requests = xprt->max_reqs;
361 /* Set some length limits */
362 cdata.rsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA write max */
363 cdata.wsize = RPCRDMA_MAX_SEGS * PAGE_SIZE; /* RDMA read max */
365 cdata.inline_wsize = xprt_rdma_max_inline_write;
366 if (cdata.inline_wsize > cdata.wsize)
367 cdata.inline_wsize = cdata.wsize;
369 cdata.inline_rsize = xprt_rdma_max_inline_read;
370 if (cdata.inline_rsize > cdata.rsize)
371 cdata.inline_rsize = cdata.rsize;
373 cdata.padding = xprt_rdma_inline_write_padding;
376 * Create new transport instance, which includes initialized
382 new_xprt = rpcx_to_rdmax(xprt);
384 rc = rpcrdma_ia_open(new_xprt, (struct sockaddr *) &cdata.addr,
385 xprt_rdma_memreg_strategy);
390 * initialize and create ep
392 new_xprt->rx_data = cdata;
393 new_ep = &new_xprt->rx_ep;
394 new_ep->rep_remote_addr = cdata.addr;
396 rc = rpcrdma_ep_create(&new_xprt->rx_ep,
397 &new_xprt->rx_ia, &new_xprt->rx_data);
402 * Allocate pre-registered send and receive buffers for headers and
403 * any inline data. Also specify any padding which will be provided
404 * from a preregistered zero buffer.
406 rc = rpcrdma_buffer_create(new_xprt);
411 * Register a callback for connection events. This is necessary because
412 * connection loss notification is async. We also catch connection loss
413 * when reaping receives.
415 INIT_DELAYED_WORK(&new_xprt->rx_connect_worker,
416 xprt_rdma_connect_worker);
418 xprt_rdma_format_addresses(xprt);
419 xprt->max_payload = new_xprt->rx_ia.ri_ops->ro_maxpages(new_xprt);
420 if (xprt->max_payload == 0)
422 xprt->max_payload <<= PAGE_SHIFT;
423 dprintk("RPC: %s: transport data payload maximum: %zu bytes\n",
424 __func__, xprt->max_payload);
426 if (!try_module_get(THIS_MODULE))
432 xprt_rdma_free_addresses(xprt);
435 rpcrdma_ep_destroy(new_ep, &new_xprt->rx_ia);
437 rpcrdma_ia_close(&new_xprt->rx_ia);
444 * Close a connection, during shutdown or timeout/reconnect
447 xprt_rdma_close(struct rpc_xprt *xprt)
449 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
451 dprintk("RPC: %s: closing\n", __func__);
452 if (r_xprt->rx_ep.rep_connected > 0)
453 xprt->reestablish_timeout = 0;
454 xprt_disconnect_done(xprt);
455 rpcrdma_ep_disconnect(&r_xprt->rx_ep, &r_xprt->rx_ia);
459 xprt_rdma_set_port(struct rpc_xprt *xprt, u16 port)
461 struct sockaddr_in *sap;
463 sap = (struct sockaddr_in *)&xprt->addr;
464 sap->sin_port = htons(port);
465 sap = (struct sockaddr_in *)&rpcx_to_rdmad(xprt).addr;
466 sap->sin_port = htons(port);
467 dprintk("RPC: %s: %u\n", __func__, port);
471 xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
473 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
475 if (r_xprt->rx_ep.rep_connected != 0) {
477 schedule_delayed_work(&r_xprt->rx_connect_worker,
478 xprt->reestablish_timeout);
479 xprt->reestablish_timeout <<= 1;
480 if (xprt->reestablish_timeout > RPCRDMA_MAX_REEST_TO)
481 xprt->reestablish_timeout = RPCRDMA_MAX_REEST_TO;
482 else if (xprt->reestablish_timeout < RPCRDMA_INIT_REEST_TO)
483 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
485 schedule_delayed_work(&r_xprt->rx_connect_worker, 0);
486 if (!RPC_IS_ASYNC(task))
487 flush_delayed_work(&r_xprt->rx_connect_worker);
492 * The RDMA allocate/free functions need the task structure as a place
493 * to hide the struct rpcrdma_req, which is necessary for the actual send/recv
496 * The RPC layer allocates both send and receive buffers in the same call
497 * (rq_send_buf and rq_rcv_buf are both part of a single contiguous buffer).
498 * We may register rq_rcv_buf when using reply chunks.
501 xprt_rdma_allocate(struct rpc_task *task, size_t size)
503 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
504 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
505 struct rpcrdma_regbuf *rb;
506 struct rpcrdma_req *req;
510 req = rpcrdma_buffer_get(&r_xprt->rx_buf);
514 flags = GFP_NOIO | __GFP_NOWARN;
515 if (RPC_IS_SWAPPER(task))
516 flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
518 if (req->rl_rdmabuf == NULL)
520 if (req->rl_sendbuf == NULL)
522 if (size > req->rl_sendbuf->rg_size)
526 dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req);
527 req->rl_connect_cookie = 0; /* our reserved value */
528 return req->rl_sendbuf->rg_base;
531 min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
532 rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags);
535 req->rl_rdmabuf = rb;
538 /* XDR encoding and RPC/RDMA marshaling of this request has not
539 * yet occurred. Thus a lower bound is needed to prevent buffer
540 * overrun during marshaling.
542 * RPC/RDMA marshaling may choose to send payload bearing ops
543 * inline, if the result is smaller than the inline threshold.
544 * The value of the "size" argument accounts for header
545 * requirements but not for the payload in these cases.
547 * Likewise, allocate enough space to receive a reply up to the
548 * size of the inline threshold.
550 * It's unlikely that both the send header and the received
551 * reply will be large, but slush is provided here to allow
552 * flexibility when marshaling.
554 min_size = RPCRDMA_INLINE_READ_THRESHOLD(task->tk_rqstp);
555 min_size += RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
559 rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, size, flags);
564 r_xprt->rx_stats.hardway_register_count += size;
565 rpcrdma_free_regbuf(&r_xprt->rx_ia, req->rl_sendbuf);
566 req->rl_sendbuf = rb;
570 rpcrdma_buffer_put(req);
571 r_xprt->rx_stats.failed_marshal_count++;
576 * This function returns all RDMA resources to the pool.
579 xprt_rdma_free(void *buffer)
581 struct rpcrdma_req *req;
582 struct rpcrdma_xprt *r_xprt;
583 struct rpcrdma_regbuf *rb;
589 rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]);
591 r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);
593 dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
595 for (i = 0; req->rl_nchunks;) {
597 i += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
598 &req->rl_segments[i]);
601 rpcrdma_buffer_put(req);
605 * send_request invokes the meat of RPC RDMA. It must do the following:
606 * 1. Marshal the RPC request into an RPC RDMA request, which means
607 * putting a header in front of data, and creating IOVs for RDMA
608 * from those in the request.
609 * 2. In marshaling, detect opportunities for RDMA, and use them.
610 * 3. Post a recv message to set up asynch completion, then send
611 * the request (rpcrdma_ep_post).
612 * 4. No partial sends are possible in the RPC-RDMA protocol (as in UDP).
616 xprt_rdma_send_request(struct rpc_task *task)
618 struct rpc_rqst *rqst = task->tk_rqstp;
619 struct rpc_xprt *xprt = rqst->rq_xprt;
620 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
621 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
624 rc = rpcrdma_marshal_req(rqst);
628 if (req->rl_reply == NULL) /* e.g. reconnection */
629 rpcrdma_recv_buffer_get(req);
631 /* Must suppress retransmit to maintain credits */
632 if (req->rl_connect_cookie == xprt->connect_cookie)
633 goto drop_connection;
634 req->rl_connect_cookie = xprt->connect_cookie;
636 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
637 goto drop_connection;
639 rqst->rq_xmit_bytes_sent += rqst->rq_snd_buf.len;
640 rqst->rq_bytes_sent = 0;
644 r_xprt->rx_stats.failed_marshal_count++;
645 dprintk("RPC: %s: rpcrdma_marshal_req failed, status %i\n",
650 xprt_disconnect_done(xprt);
651 return -ENOTCONN; /* implies disconnect */
654 static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
656 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
659 if (xprt_connected(xprt))
660 idle_time = (long)(jiffies - xprt->last_used) / HZ;
663 "\txprt:\trdma %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu "
664 "%lu %lu %lu %Lu %Lu %Lu %Lu %lu %lu %lu\n",
666 0, /* need a local port? */
667 xprt->stat.bind_count,
668 xprt->stat.connect_count,
669 xprt->stat.connect_time,
677 r_xprt->rx_stats.read_chunk_count,
678 r_xprt->rx_stats.write_chunk_count,
679 r_xprt->rx_stats.reply_chunk_count,
680 r_xprt->rx_stats.total_rdma_request,
681 r_xprt->rx_stats.total_rdma_reply,
682 r_xprt->rx_stats.pullup_copy_count,
683 r_xprt->rx_stats.fixup_copy_count,
684 r_xprt->rx_stats.hardway_register_count,
685 r_xprt->rx_stats.failed_marshal_count,
686 r_xprt->rx_stats.bad_reply_count);
690 xprt_rdma_enable_swap(struct rpc_xprt *xprt)
696 xprt_rdma_disable_swap(struct rpc_xprt *xprt)
701 * Plumbing for rpc transport switch and kernel module
704 static struct rpc_xprt_ops xprt_rdma_procs = {
705 .reserve_xprt = xprt_reserve_xprt_cong,
706 .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
707 .alloc_slot = xprt_alloc_slot,
708 .release_request = xprt_release_rqst_cong, /* ditto */
709 .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */
710 .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */
711 .set_port = xprt_rdma_set_port,
712 .connect = xprt_rdma_connect,
713 .buf_alloc = xprt_rdma_allocate,
714 .buf_free = xprt_rdma_free,
715 .send_request = xprt_rdma_send_request,
716 .close = xprt_rdma_close,
717 .destroy = xprt_rdma_destroy,
718 .print_stats = xprt_rdma_print_stats,
719 .enable_swap = xprt_rdma_enable_swap,
720 .disable_swap = xprt_rdma_disable_swap,
721 .inject_disconnect = xprt_rdma_inject_disconnect
724 static struct xprt_class xprt_rdma = {
725 .list = LIST_HEAD_INIT(xprt_rdma.list),
727 .owner = THIS_MODULE,
728 .ident = XPRT_TRANSPORT_RDMA,
729 .setup = xprt_setup_rdma,
732 static void __exit xprt_rdma_cleanup(void)
736 dprintk("RPCRDMA Module Removed, deregister RPC RDMA transport\n");
737 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
738 if (sunrpc_table_header) {
739 unregister_sysctl_table(sunrpc_table_header);
740 sunrpc_table_header = NULL;
743 rc = xprt_unregister_transport(&xprt_rdma);
745 dprintk("RPC: %s: xprt_unregister returned %i\n",
748 frwr_destroy_recovery_wq();
751 static int __init xprt_rdma_init(void)
755 rc = frwr_alloc_recovery_wq();
759 rc = xprt_register_transport(&xprt_rdma);
761 frwr_destroy_recovery_wq();
765 dprintk("RPCRDMA Module Init, register RPC RDMA transport\n");
767 dprintk("Defaults:\n");
768 dprintk("\tSlots %d\n"
769 "\tMaxInlineRead %d\n\tMaxInlineWrite %d\n",
770 xprt_rdma_slot_table_entries,
771 xprt_rdma_max_inline_read, xprt_rdma_max_inline_write);
772 dprintk("\tPadding %d\n\tMemreg %d\n",
773 xprt_rdma_inline_write_padding, xprt_rdma_memreg_strategy);
775 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
776 if (!sunrpc_table_header)
777 sunrpc_table_header = register_sysctl_table(sunrpc_table);
782 module_init(xprt_rdma_init);
783 module_exit(xprt_rdma_cleanup);