2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * This file contains the guts of the RPC RDMA protocol, and
44 * does marshaling/unmarshaling, etc. It is also where interfacing
45 * to the Linux RPC framework lives.
48 #include "xprt_rdma.h"
50 #include <linux/highmem.h>
52 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
53 # define RPCDBG_FACILITY RPCDBG_TRANS
56 static const char transfertypes[][12] = {
57 "inline", /* no chunks */
58 "read list", /* some argument via rdma read */
59 "*read list", /* entire request via rdma read */
60 "write list", /* some result via rdma write */
61 "reply chunk" /* entire reply via rdma write */
64 /* Returns size of largest RPC-over-RDMA header in a Call message
66 * The largest Call header contains a full-size Read list and a
67 * minimal Reply chunk.
69 static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
73 /* Fixed header fields and list discriminators */
74 size = RPCRDMA_HDRLEN_MIN;
76 /* Maximum Read list size */
77 maxsegs += 2; /* segment for head and tail buffers */
78 size = maxsegs * sizeof(struct rpcrdma_read_chunk);
80 /* Minimal Read chunk size */
81 size += sizeof(__be32); /* segment count */
82 size += sizeof(struct rpcrdma_segment);
83 size += sizeof(__be32); /* list discriminator */
85 dprintk("RPC: %s: max call header size = %u\n",
90 /* Returns size of largest RPC-over-RDMA header in a Reply message
92 * There is only one Write list or one Reply chunk per Reply
93 * message. The larger list is the Write list.
95 static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
99 /* Fixed header fields and list discriminators */
100 size = RPCRDMA_HDRLEN_MIN;
102 /* Maximum Write list size */
103 maxsegs += 2; /* segment for head and tail buffers */
104 size = sizeof(__be32); /* segment count */
105 size += maxsegs * sizeof(struct rpcrdma_segment);
106 size += sizeof(__be32); /* list discriminator */
108 dprintk("RPC: %s: max reply header size = %u\n",
113 void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *r_xprt)
115 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
116 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
117 unsigned int maxsegs = ia->ri_max_segs;
119 ia->ri_max_inline_write = cdata->inline_wsize -
120 rpcrdma_max_call_header_size(maxsegs);
121 ia->ri_max_inline_read = cdata->inline_rsize -
122 rpcrdma_max_reply_header_size(maxsegs);
125 /* The client can send a request inline as long as the RPCRDMA header
126 * plus the RPC call fit under the transport's inline limit. If the
127 * combined call message size exceeds that limit, the client must use
128 * the read chunk list for this operation.
130 static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
131 struct rpc_rqst *rqst)
133 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
135 return rqst->rq_snd_buf.len <= ia->ri_max_inline_write;
138 /* The client can't know how large the actual reply will be. Thus it
139 * plans for the largest possible reply for that particular ULP
140 * operation. If the maximum combined reply message size exceeds that
141 * limit, the client must provide a write list or a reply chunk for
144 static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
145 struct rpc_rqst *rqst)
147 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
149 return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
152 /* Split "vec" on page boundaries into segments. FMR registers pages,
153 * not a byte range. Other modes coalesce these segments into a single
157 rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg, int n)
163 base = vec->iov_base;
164 page_offset = offset_in_page(base);
165 remaining = vec->iov_len;
166 while (remaining && n < RPCRDMA_MAX_SEGS) {
167 seg[n].mr_page = NULL;
168 seg[n].mr_offset = base;
169 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
170 remaining -= seg[n].mr_len;
171 base += seg[n].mr_len;
179 * Chunk assembly from upper layer xdr_buf.
181 * Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
182 * elements. Segments are then coalesced when registered, if possible
183 * within the selected memreg mode.
185 * Returns positive number of segments converted, or a negative errno.
189 rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
190 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg,
191 bool reminv_expected)
193 int len, n, p, page_base;
194 struct page **ppages;
198 n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n);
199 if (n == RPCRDMA_MAX_SEGS)
203 len = xdrbuf->page_len;
204 ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
205 page_base = xdrbuf->page_base & ~PAGE_MASK;
207 while (len && n < RPCRDMA_MAX_SEGS) {
209 /* alloc the pagelist for receiving buffer */
210 ppages[p] = alloc_page(GFP_ATOMIC);
214 seg[n].mr_page = ppages[p];
215 seg[n].mr_offset = (void *)(unsigned long) page_base;
216 seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
217 if (seg[n].mr_len > PAGE_SIZE)
219 len -= seg[n].mr_len;
222 page_base = 0; /* page offset only applies to first page */
225 /* Message overflows the seg array */
226 if (len && n == RPCRDMA_MAX_SEGS)
229 /* When encoding a Read chunk, the tail iovec contains an
230 * XDR pad and may be omitted.
232 if (type == rpcrdma_readch && xprt_rdma_pad_optimize)
235 /* When encoding the Write list, some servers need to see an extra
236 * segment for odd-length Write chunks. The upper layer provides
237 * space in the tail iovec for this purpose.
239 if (type == rpcrdma_writech && reminv_expected)
242 if (xdrbuf->tail[0].iov_len) {
243 n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n);
244 if (n == RPCRDMA_MAX_SEGS)
251 pr_err("rpcrdma: segment array overflow\n");
255 static inline __be32 *
256 xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mw *mw)
258 *iptr++ = cpu_to_be32(mw->mw_handle);
259 *iptr++ = cpu_to_be32(mw->mw_length);
260 return xdr_encode_hyper(iptr, mw->mw_offset);
263 /* XDR-encode the Read list. Supports encoding a list of read
264 * segments that belong to a single read chunk.
266 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
268 * Read chunklist (a linked list):
269 * N elements, position P (same P for all chunks of same arg!):
270 * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
272 * Returns a pointer to the XDR word in the RDMA header following
273 * the end of the Read list, or an error pointer.
276 rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
277 struct rpcrdma_req *req, struct rpc_rqst *rqst,
278 __be32 *iptr, enum rpcrdma_chunktype rtype)
280 struct rpcrdma_mr_seg *seg;
281 struct rpcrdma_mw *mw;
285 if (rtype == rpcrdma_noch) {
286 *iptr++ = xdr_zero; /* item not present */
290 pos = rqst->rq_snd_buf.head[0].iov_len;
291 if (rtype == rpcrdma_areadch)
293 seg = req->rl_segments;
294 nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg, false);
296 return ERR_PTR(nsegs);
299 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
303 list_add(&mw->mw_list, &req->rl_registered);
305 *iptr++ = xdr_one; /* item present */
307 /* All read segments in this chunk
308 * have the same "position".
310 *iptr++ = cpu_to_be32(pos);
311 iptr = xdr_encode_rdma_segment(iptr, mw);
313 dprintk("RPC: %5u %s: pos %u %u@0x%016llx:0x%08x (%s)\n",
314 rqst->rq_task->tk_pid, __func__, pos,
315 mw->mw_length, (unsigned long long)mw->mw_offset,
316 mw->mw_handle, n < nsegs ? "more" : "last");
318 r_xprt->rx_stats.read_chunk_count++;
323 /* Finish Read list */
324 *iptr++ = xdr_zero; /* Next item not present */
328 /* XDR-encode the Write list. Supports encoding a list containing
329 * one array of plain segments that belong to a single write chunk.
331 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
333 * Write chunklist (a list of (one) counted array):
335 * 1 - N - HLOO - HLOO - ... - HLOO - 0
337 * Returns a pointer to the XDR word in the RDMA header following
338 * the end of the Write list, or an error pointer.
341 rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
342 struct rpc_rqst *rqst, __be32 *iptr,
343 enum rpcrdma_chunktype wtype)
345 struct rpcrdma_mr_seg *seg;
346 struct rpcrdma_mw *mw;
347 int n, nsegs, nchunks;
350 if (wtype != rpcrdma_writech) {
351 *iptr++ = xdr_zero; /* no Write list present */
355 seg = req->rl_segments;
356 nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf,
357 rqst->rq_rcv_buf.head[0].iov_len,
359 r_xprt->rx_ia.ri_reminv_expected);
361 return ERR_PTR(nsegs);
363 *iptr++ = xdr_one; /* Write list present */
364 segcount = iptr++; /* save location of segment count */
368 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
372 list_add(&mw->mw_list, &req->rl_registered);
374 iptr = xdr_encode_rdma_segment(iptr, mw);
376 dprintk("RPC: %5u %s: %u@0x016%llx:0x%08x (%s)\n",
377 rqst->rq_task->tk_pid, __func__,
378 mw->mw_length, (unsigned long long)mw->mw_offset,
379 mw->mw_handle, n < nsegs ? "more" : "last");
381 r_xprt->rx_stats.write_chunk_count++;
382 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
388 /* Update count of segments in this Write chunk */
389 *segcount = cpu_to_be32(nchunks);
391 /* Finish Write list */
392 *iptr++ = xdr_zero; /* Next item not present */
396 /* XDR-encode the Reply chunk. Supports encoding an array of plain
397 * segments that belong to a single write (reply) chunk.
399 * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
401 * Reply chunk (a counted array):
403 * 1 - N - HLOO - HLOO - ... - HLOO
405 * Returns a pointer to the XDR word in the RDMA header following
406 * the end of the Reply chunk, or an error pointer.
409 rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
410 struct rpcrdma_req *req, struct rpc_rqst *rqst,
411 __be32 *iptr, enum rpcrdma_chunktype wtype)
413 struct rpcrdma_mr_seg *seg;
414 struct rpcrdma_mw *mw;
415 int n, nsegs, nchunks;
418 if (wtype != rpcrdma_replych) {
419 *iptr++ = xdr_zero; /* no Reply chunk present */
423 seg = req->rl_segments;
424 nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg,
425 r_xprt->rx_ia.ri_reminv_expected);
427 return ERR_PTR(nsegs);
429 *iptr++ = xdr_one; /* Reply chunk present */
430 segcount = iptr++; /* save location of segment count */
434 n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs,
438 list_add(&mw->mw_list, &req->rl_registered);
440 iptr = xdr_encode_rdma_segment(iptr, mw);
442 dprintk("RPC: %5u %s: %u@0x%016llx:0x%08x (%s)\n",
443 rqst->rq_task->tk_pid, __func__,
444 mw->mw_length, (unsigned long long)mw->mw_offset,
445 mw->mw_handle, n < nsegs ? "more" : "last");
447 r_xprt->rx_stats.reply_chunk_count++;
448 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
454 /* Update count of segments in the Reply chunk */
455 *segcount = cpu_to_be32(nchunks);
460 /* Prepare the RPC-over-RDMA header SGE.
463 rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
466 struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
467 struct ib_sge *sge = &req->rl_send_sge[0];
469 if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) {
470 if (!__rpcrdma_dma_map_regbuf(ia, rb))
472 sge->addr = rdmab_addr(rb);
473 sge->lkey = rdmab_lkey(rb);
477 ib_dma_sync_single_for_device(ia->ri_device, sge->addr,
478 sge->length, DMA_TO_DEVICE);
479 req->rl_send_wr.num_sge++;
483 /* Prepare the Send SGEs. The head and tail iovec, and each entry
484 * in the page list, gets its own SGE.
487 rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
488 struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
490 unsigned int sge_no, page_base, len, remaining;
491 struct rpcrdma_regbuf *rb = req->rl_sendbuf;
492 struct ib_device *device = ia->ri_device;
493 struct ib_sge *sge = req->rl_send_sge;
494 u32 lkey = ia->ri_pd->local_dma_lkey;
495 struct page *page, **ppages;
497 /* The head iovec is straightforward, as it is already
498 * DMA-mapped. Sync the content that has changed.
500 if (!rpcrdma_dma_map_regbuf(ia, rb))
503 sge[sge_no].addr = rdmab_addr(rb);
504 sge[sge_no].length = xdr->head[0].iov_len;
505 sge[sge_no].lkey = rdmab_lkey(rb);
506 ib_dma_sync_single_for_device(device, sge[sge_no].addr,
507 sge[sge_no].length, DMA_TO_DEVICE);
509 /* If there is a Read chunk, the page list is being handled
510 * via explicit RDMA, and thus is skipped here. However, the
511 * tail iovec may include an XDR pad for the page list, as
512 * well as additional content, and may not reside in the
513 * same page as the head iovec.
515 if (rtype == rpcrdma_readch) {
516 len = xdr->tail[0].iov_len;
518 /* Do not include the tail if it is only an XDR pad */
522 page = virt_to_page(xdr->tail[0].iov_base);
523 page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
525 /* If the content in the page list is an odd length,
526 * xdr_write_pages() has added a pad at the beginning
527 * of the tail iovec. Force the tail's non-pad content
528 * to land at the next XDR position in the Send message.
530 page_base += len & 3;
535 /* If there is a page list present, temporarily DMA map
536 * and prepare an SGE for each page to be sent.
539 ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
540 page_base = xdr->page_base & ~PAGE_MASK;
541 remaining = xdr->page_len;
544 if (sge_no > RPCRDMA_MAX_SEND_SGES - 2)
545 goto out_mapping_overflow;
547 len = min_t(u32, PAGE_SIZE - page_base, remaining);
548 sge[sge_no].addr = ib_dma_map_page(device, *ppages,
551 if (ib_dma_mapping_error(device, sge[sge_no].addr))
552 goto out_mapping_err;
553 sge[sge_no].length = len;
554 sge[sge_no].lkey = lkey;
556 req->rl_mapped_sges++;
563 /* The tail iovec is not always constructed in the same
564 * page where the head iovec resides (see, for example,
565 * gss_wrap_req_priv). To neatly accommodate that case,
566 * DMA map it separately.
568 if (xdr->tail[0].iov_len) {
569 page = virt_to_page(xdr->tail[0].iov_base);
570 page_base = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
571 len = xdr->tail[0].iov_len;
575 sge[sge_no].addr = ib_dma_map_page(device, page,
578 if (ib_dma_mapping_error(device, sge[sge_no].addr))
579 goto out_mapping_err;
580 sge[sge_no].length = len;
581 sge[sge_no].lkey = lkey;
582 req->rl_mapped_sges++;
586 req->rl_send_wr.num_sge = sge_no + 1;
589 out_mapping_overflow:
590 pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
594 pr_err("rpcrdma: Send mapping error\n");
599 rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
600 u32 hdrlen, struct xdr_buf *xdr,
601 enum rpcrdma_chunktype rtype)
603 req->rl_send_wr.num_sge = 0;
604 req->rl_mapped_sges = 0;
606 if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen))
609 if (rtype != rpcrdma_areadch)
610 if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype))
616 pr_err("rpcrdma: failed to DMA map a Send buffer\n");
621 rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
623 struct ib_device *device = ia->ri_device;
627 sge = &req->rl_send_sge[2];
628 for (count = req->rl_mapped_sges; count--; sge++)
629 ib_dma_unmap_page(device, sge->addr, sge->length,
631 req->rl_mapped_sges = 0;
635 * Marshal a request: the primary job of this routine is to choose
636 * the transfer modes. See comments below.
638 * Returns zero on success, otherwise a negative errno.
642 rpcrdma_marshal_req(struct rpc_rqst *rqst)
644 struct rpc_xprt *xprt = rqst->rq_xprt;
645 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
646 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
647 enum rpcrdma_chunktype rtype, wtype;
648 struct rpcrdma_msg *headerp;
654 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
655 if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
656 return rpcrdma_bc_marshal_reply(rqst);
659 headerp = rdmab_to_msg(req->rl_rdmabuf);
660 /* don't byte-swap XID, it's already done in request */
661 headerp->rm_xid = rqst->rq_xid;
662 headerp->rm_vers = rpcrdma_version;
663 headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
664 headerp->rm_type = rdma_msg;
666 /* When the ULP employs a GSS flavor that guarantees integrity
667 * or privacy, direct data placement of individual data items
670 ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
671 RPCAUTH_AUTH_DATATOUCH);
674 * Chunks needed for results?
676 * o If the expected result is under the inline threshold, all ops
678 * o Large read ops return data as write chunk(s), header as
680 * o Large non-read ops return as a single reply chunk.
682 if (rpcrdma_results_inline(r_xprt, rqst))
683 wtype = rpcrdma_noch;
684 else if (ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ)
685 wtype = rpcrdma_writech;
687 wtype = rpcrdma_replych;
690 * Chunks needed for arguments?
692 * o If the total request is under the inline threshold, all ops
693 * are sent as inline.
694 * o Large write ops transmit data as read chunk(s), header as
696 * o Large non-write ops are sent with the entire message as a
697 * single read chunk (protocol 0-position special case).
699 * This assumes that the upper layer does not present a request
700 * that both has a data payload, and whose non-data arguments
701 * by themselves are larger than the inline threshold.
703 if (rpcrdma_args_inline(r_xprt, rqst)) {
704 rtype = rpcrdma_noch;
705 rpclen = rqst->rq_snd_buf.len;
706 } else if (ddp_allowed && rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
707 rtype = rpcrdma_readch;
708 rpclen = rqst->rq_snd_buf.head[0].iov_len +
709 rqst->rq_snd_buf.tail[0].iov_len;
711 r_xprt->rx_stats.nomsg_call_count++;
712 headerp->rm_type = htonl(RDMA_NOMSG);
713 rtype = rpcrdma_areadch;
717 /* This implementation supports the following combinations
718 * of chunk lists in one RPC-over-RDMA Call message:
723 * - Read list + Reply chunk
725 * It might not yet support the following combinations:
727 * - Read list + Write list
729 * It does not support the following combinations:
731 * - Write list + Reply chunk
732 * - Read list + Write list + Reply chunk
734 * This implementation supports only a single chunk in each
735 * Read or Write list. Thus for example the client cannot
736 * send a Call message with a Position Zero Read chunk and a
737 * regular Read chunk at the same time.
739 iptr = headerp->rm_body.rm_chunks;
740 iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype);
743 iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype);
746 iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype);
749 hdrlen = (unsigned char *)iptr - (unsigned char *)headerp;
751 dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n",
752 rqst->rq_task->tk_pid, __func__,
753 transfertypes[rtype], transfertypes[wtype],
756 if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, hdrlen,
757 &rqst->rq_snd_buf, rtype)) {
758 iptr = ERR_PTR(-EIO);
764 r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
765 return PTR_ERR(iptr);
769 * Chase down a received write or reply chunklist to get length
770 * RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
773 rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp)
775 unsigned int i, total_len;
776 struct rpcrdma_write_chunk *cur_wchunk;
777 char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
779 i = be32_to_cpu(**iptrp);
780 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
783 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
786 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
787 dprintk("RPC: %s: chunk %d@0x%016llx:0x%08x\n",
789 be32_to_cpu(seg->rs_length),
790 (unsigned long long)off,
791 be32_to_cpu(seg->rs_handle));
793 total_len += be32_to_cpu(seg->rs_length);
796 /* check and adjust for properly terminated write chunk */
798 __be32 *w = (__be32 *) cur_wchunk;
799 if (*w++ != xdr_zero)
801 cur_wchunk = (struct rpcrdma_write_chunk *) w;
803 if ((char *)cur_wchunk > base + rep->rr_len)
806 *iptrp = (__be32 *) cur_wchunk;
811 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
812 * @rqst: controlling RPC request
813 * @srcp: points to RPC message payload in receive buffer
814 * @copy_len: remaining length of receive buffer content
815 * @pad: Write chunk pad bytes needed (zero for pure inline)
817 * The upper layer has set the maximum number of bytes it can
818 * receive in each component of rq_rcv_buf. These values are set in
819 * the head.iov_len, page_len, tail.iov_len, and buflen fields.
821 * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
822 * many cases this function simply updates iov_base pointers in
823 * rq_rcv_buf to point directly to the received reply data, to
824 * avoid copying reply data.
826 * Returns the count of bytes which had to be memcopied.
829 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
831 unsigned long fixup_copy_count;
832 int i, npages, curlen;
834 struct page **ppages;
837 /* The head iovec is redirected to the RPC reply message
838 * in the receive buffer, to avoid a memcopy.
840 rqst->rq_rcv_buf.head[0].iov_base = srcp;
841 rqst->rq_private_buf.head[0].iov_base = srcp;
843 /* The contents of the receive buffer that follow
844 * head.iov_len bytes are copied into the page list.
846 curlen = rqst->rq_rcv_buf.head[0].iov_len;
847 if (curlen > copy_len)
849 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
850 __func__, srcp, copy_len, curlen);
854 page_base = rqst->rq_rcv_buf.page_base;
855 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
856 page_base &= ~PAGE_MASK;
857 fixup_copy_count = 0;
858 if (copy_len && rqst->rq_rcv_buf.page_len) {
861 pagelist_len = rqst->rq_rcv_buf.page_len;
862 if (pagelist_len > copy_len)
863 pagelist_len = copy_len;
864 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
865 for (i = 0; i < npages; i++) {
866 curlen = PAGE_SIZE - page_base;
867 if (curlen > pagelist_len)
868 curlen = pagelist_len;
870 dprintk("RPC: %s: page %d"
871 " srcp 0x%p len %d curlen %d\n",
872 __func__, i, srcp, copy_len, curlen);
873 destp = kmap_atomic(ppages[i]);
874 memcpy(destp + page_base, srcp, curlen);
875 flush_dcache_page(ppages[i]);
876 kunmap_atomic(destp);
879 fixup_copy_count += curlen;
880 pagelist_len -= curlen;
886 /* Implicit padding for the last segment in a Write
887 * chunk is inserted inline at the front of the tail
888 * iovec. The upper layer ignores the content of
889 * the pad. Simply ensure inline content in the tail
890 * that follows the Write chunk is properly aligned.
896 /* The tail iovec is redirected to the remaining data
897 * in the receive buffer, to avoid a memcopy.
899 if (copy_len || pad) {
900 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
901 rqst->rq_private_buf.tail[0].iov_base = srcp;
904 return fixup_copy_count;
907 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
908 /* By convention, backchannel calls arrive via rdma_msg type
909 * messages, and never populate the chunk lists. This makes
910 * the RPC/RDMA header small and fixed in size, so it is
911 * straightforward to check the RPC header's direction field.
914 rpcrdma_is_bcall(struct rpcrdma_msg *headerp)
916 __be32 *p = (__be32 *)headerp;
918 if (headerp->rm_type != rdma_msg)
920 if (headerp->rm_body.rm_chunks[0] != xdr_zero)
922 if (headerp->rm_body.rm_chunks[1] != xdr_zero)
924 if (headerp->rm_body.rm_chunks[2] != xdr_zero)
928 if (p[7] != headerp->rm_xid)
931 if (p[8] != cpu_to_be32(RPC_CALL))
936 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
938 /* Process received RPC/RDMA messages.
940 * Errors must result in the RPC task either being awakened, or
941 * allowed to timeout, to discover the errors at that time.
944 rpcrdma_reply_handler(struct work_struct *work)
946 struct rpcrdma_rep *rep =
947 container_of(work, struct rpcrdma_rep, rr_work);
948 struct rpcrdma_msg *headerp;
949 struct rpcrdma_req *req;
950 struct rpc_rqst *rqst;
951 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
952 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
954 int rdmalen, status, rmerr;
957 dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
959 if (rep->rr_len == RPCRDMA_BAD_LEN)
961 if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
964 headerp = rdmab_to_msg(rep->rr_rdmabuf);
965 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
966 if (rpcrdma_is_bcall(headerp))
970 /* Match incoming rpcrdma_rep to an rpcrdma_req to
971 * get context for handling any incoming chunks.
973 spin_lock_bh(&xprt->transport_lock);
974 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
978 req = rpcr_to_rdmar(rqst);
982 /* Sanity checking has passed. We are now committed
983 * to complete this transaction.
985 list_del_init(&rqst->rq_list);
986 spin_unlock_bh(&xprt->transport_lock);
987 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
988 __func__, rep, req, be32_to_cpu(headerp->rm_xid));
990 /* from here on, the reply is no longer an orphan */
992 xprt->reestablish_timeout = 0;
994 if (headerp->rm_vers != rpcrdma_version)
997 /* check for expected message types */
998 /* The order of some of these tests is important. */
999 switch (headerp->rm_type) {
1001 /* never expect read chunks */
1002 /* never expect reply chunks (two ways to check) */
1003 /* never expect write chunks without having offered RDMA */
1004 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
1005 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
1006 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
1007 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
1008 list_empty(&req->rl_registered)))
1010 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
1011 /* count any expected write chunks in read reply */
1012 /* start at write chunk array count */
1013 iptr = &headerp->rm_body.rm_chunks[2];
1014 rdmalen = rpcrdma_count_chunks(rep, 1, &iptr);
1015 /* check for validity, and no reply chunk after */
1016 if (rdmalen < 0 || *iptr++ != xdr_zero)
1019 ((unsigned char *)iptr - (unsigned char *)headerp);
1020 status = rep->rr_len + rdmalen;
1021 r_xprt->rx_stats.total_rdma_reply += rdmalen;
1022 /* special case - last chunk may omit padding */
1024 rdmalen = 4 - rdmalen;
1028 /* else ordinary inline */
1030 iptr = (__be32 *)((unsigned char *)headerp +
1031 RPCRDMA_HDRLEN_MIN);
1032 rep->rr_len -= RPCRDMA_HDRLEN_MIN;
1033 status = rep->rr_len;
1036 r_xprt->rx_stats.fixup_copy_count +=
1037 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len,
1042 /* never expect read or write chunks, always reply chunks */
1043 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
1044 headerp->rm_body.rm_chunks[1] != xdr_zero ||
1045 headerp->rm_body.rm_chunks[2] != xdr_one ||
1046 list_empty(&req->rl_registered))
1048 iptr = (__be32 *)((unsigned char *)headerp +
1049 RPCRDMA_HDRLEN_MIN);
1050 rdmalen = rpcrdma_count_chunks(rep, 0, &iptr);
1053 r_xprt->rx_stats.total_rdma_reply += rdmalen;
1054 /* Reply chunk buffer already is the reply vector - no fixup. */
1063 dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
1064 rqst->rq_task->tk_pid, __func__,
1065 be32_to_cpu(headerp->rm_type));
1067 r_xprt->rx_stats.bad_reply_count++;
1072 /* Invalidate and flush the data payloads before waking the
1073 * waiting application. This guarantees the memory region is
1074 * properly fenced from the server before the application
1075 * accesses the data. It also ensures proper send flow
1076 * control: waking the next RPC waits until this RPC has
1077 * relinquished all its Send Queue entries.
1079 if (!list_empty(&req->rl_registered))
1080 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
1082 spin_lock_bh(&xprt->transport_lock);
1084 xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
1085 if (xprt->cwnd > cwnd)
1086 xprt_release_rqst_cong(rqst->rq_task);
1088 xprt_complete_rqst(rqst->rq_task, status);
1089 spin_unlock_bh(&xprt->transport_lock);
1090 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
1091 __func__, xprt, rqst, status);
1095 rpcrdma_recv_buffer_put(rep);
1096 if (r_xprt->rx_ep.rep_connected == 1) {
1097 r_xprt->rx_ep.rep_connected = -EIO;
1098 rpcrdma_conn_func(&r_xprt->rx_ep);
1102 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1104 rpcrdma_bc_receive_call(r_xprt, rep);
1108 /* If the incoming reply terminated a pending RPC, the next
1109 * RPC call will post a replacement receive buffer as it is
1113 dprintk("RPC: %s: invalid version %d\n",
1114 __func__, be32_to_cpu(headerp->rm_vers));
1116 r_xprt->rx_stats.bad_reply_count++;
1120 rmerr = be32_to_cpu(headerp->rm_body.rm_error.rm_err);
1123 pr_err("%s: server reports header version error (%u-%u)\n",
1125 be32_to_cpu(headerp->rm_body.rm_error.rm_vers_low),
1126 be32_to_cpu(headerp->rm_body.rm_error.rm_vers_high));
1129 pr_err("%s: server reports header decoding error\n",
1133 pr_err("%s: server reports unknown error %d\n",
1136 status = -EREMOTEIO;
1137 r_xprt->rx_stats.bad_reply_count++;
1140 /* If no pending RPC transaction was matched, post a replacement
1141 * receive buffer before returning.
1144 dprintk("RPC: %s: short/invalid reply\n", __func__);
1148 spin_unlock_bh(&xprt->transport_lock);
1149 dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n",
1150 __func__, be32_to_cpu(headerp->rm_xid),
1155 spin_unlock_bh(&xprt->transport_lock);
1157 "duplicate reply %p to RPC request %p: xid 0x%08x\n",
1158 __func__, rep, req, be32_to_cpu(headerp->rm_xid));
1161 r_xprt->rx_stats.bad_reply_count++;
1162 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
1163 rpcrdma_recv_buffer_put(rep);