]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
885ad9503ee0a1683d5ba77021a1184791fb5702
[karo-tx-linux.git] / net / sunrpc / xprtrdma / svc_rdma_recvfrom.c
1 /*
2  * Copyright (c) 2016, 2017 Oracle. All rights reserved.
3  * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
4  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the BSD-type
10  * license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  *      Redistributions of source code must retain the above copyright
17  *      notice, this list of conditions and the following disclaimer.
18  *
19  *      Redistributions in binary form must reproduce the above
20  *      copyright notice, this list of conditions and the following
21  *      disclaimer in the documentation and/or other materials provided
22  *      with the distribution.
23  *
24  *      Neither the name of the Network Appliance, Inc. nor the names of
25  *      its contributors may be used to endorse or promote products
26  *      derived from this software without specific prior written
27  *      permission.
28  *
29  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40  *
41  * Author: Tom Tucker <tom@opengridcomputing.com>
42  */
43
44 #include <linux/sunrpc/xdr.h>
45 #include <linux/sunrpc/debug.h>
46 #include <linux/sunrpc/rpc_rdma.h>
47 #include <linux/spinlock.h>
48 #include <asm/unaligned.h>
49 #include <rdma/ib_verbs.h>
50 #include <rdma/rdma_cm.h>
51 #include <linux/sunrpc/svc_rdma.h>
52
53 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
54
55 /*
56  * Replace the pages in the rq_argpages array with the pages from the SGE in
57  * the RDMA_RECV completion. The SGL should contain full pages up until the
58  * last one.
59  */
60 static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
61                                struct svc_rdma_op_ctxt *ctxt,
62                                u32 byte_count)
63 {
64         struct rpcrdma_msg *rmsgp;
65         struct page *page;
66         u32 bc;
67         int sge_no;
68
69         /* Swap the page in the SGE with the page in argpages */
70         page = ctxt->pages[0];
71         put_page(rqstp->rq_pages[0]);
72         rqstp->rq_pages[0] = page;
73
74         /* Set up the XDR head */
75         rqstp->rq_arg.head[0].iov_base = page_address(page);
76         rqstp->rq_arg.head[0].iov_len =
77                 min_t(size_t, byte_count, ctxt->sge[0].length);
78         rqstp->rq_arg.len = byte_count;
79         rqstp->rq_arg.buflen = byte_count;
80
81         /* Compute bytes past head in the SGL */
82         bc = byte_count - rqstp->rq_arg.head[0].iov_len;
83
84         /* If data remains, store it in the pagelist */
85         rqstp->rq_arg.page_len = bc;
86         rqstp->rq_arg.page_base = 0;
87
88         /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
89         rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
90         if (rmsgp->rm_type == rdma_nomsg)
91                 rqstp->rq_arg.pages = &rqstp->rq_pages[0];
92         else
93                 rqstp->rq_arg.pages = &rqstp->rq_pages[1];
94
95         sge_no = 1;
96         while (bc && sge_no < ctxt->count) {
97                 page = ctxt->pages[sge_no];
98                 put_page(rqstp->rq_pages[sge_no]);
99                 rqstp->rq_pages[sge_no] = page;
100                 bc -= min_t(u32, bc, ctxt->sge[sge_no].length);
101                 rqstp->rq_arg.buflen += ctxt->sge[sge_no].length;
102                 sge_no++;
103         }
104         rqstp->rq_respages = &rqstp->rq_pages[sge_no];
105         rqstp->rq_next_page = rqstp->rq_respages + 1;
106
107         /* If not all pages were used from the SGL, free the remaining ones */
108         bc = sge_no;
109         while (sge_no < ctxt->count) {
110                 page = ctxt->pages[sge_no++];
111                 put_page(page);
112         }
113         ctxt->count = bc;
114
115         /* Set up tail */
116         rqstp->rq_arg.tail[0].iov_base = NULL;
117         rqstp->rq_arg.tail[0].iov_len = 0;
118 }
119
120 /* This accommodates the largest possible Position-Zero
121  * Read chunk or Reply chunk, in one segment.
122  */
123 #define MAX_BYTES_SPECIAL_SEG   ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
124
125 /* Sanity check the Read list.
126  *
127  * Implementation limits:
128  * - This implementation supports only one Read chunk.
129  *
130  * Sanity checks:
131  * - Read list does not overflow buffer.
132  * - Segment size limited by largest NFS data payload.
133  *
134  * The segment count is limited to how many segments can
135  * fit in the transport header without overflowing the
136  * buffer. That's about 40 Read segments for a 1KB inline
137  * threshold.
138  *
139  * Returns pointer to the following Write list.
140  */
141 static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
142 {
143         u32 position;
144         bool first;
145
146         first = true;
147         while (*p++ != xdr_zero) {
148                 if (first) {
149                         position = be32_to_cpup(p++);
150                         first = false;
151                 } else if (be32_to_cpup(p++) != position) {
152                         return NULL;
153                 }
154                 p++;    /* handle */
155                 if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG)
156                         return NULL;
157                 p += 2; /* offset */
158
159                 if (p > end)
160                         return NULL;
161         }
162         return p;
163 }
164
165 static __be32 *xdr_check_write_list(__be32 *p, __be32 *end)
166 {
167         __be32 *next;
168
169         while (*p++ != xdr_zero) {
170                 next = p + 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
171                 if (next > end)
172                         return NULL;
173                 p = next;
174         }
175         return p;
176 }
177
178 static __be32 *xdr_check_reply_chunk(__be32 *p, __be32 *end)
179 {
180         __be32 *next;
181
182         if (*p++ != xdr_zero) {
183                 next = p + 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
184                 if (next > end)
185                         return NULL;
186                 p = next;
187         }
188         return p;
189 }
190
191 /* On entry, xdr->head[0].iov_base points to first byte in the
192  * RPC-over-RDMA header.
193  *
194  * On successful exit, head[0] points to first byte past the
195  * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
196  * The length of the RPC-over-RDMA header is returned.
197  *
198  * Assumptions:
199  * - The transport header is entirely contained in the head iovec.
200  */
201 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
202 {
203         __be32 *p, *end, *rdma_argp;
204         unsigned int hdr_len;
205         char *proc;
206
207         /* Verify that there's enough bytes for header + something */
208         if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
209                 goto out_short;
210
211         rdma_argp = rq_arg->head[0].iov_base;
212         if (*(rdma_argp + 1) != rpcrdma_version)
213                 goto out_version;
214
215         switch (*(rdma_argp + 3)) {
216         case rdma_msg:
217                 proc = "RDMA_MSG";
218                 break;
219         case rdma_nomsg:
220                 proc = "RDMA_NOMSG";
221                 break;
222
223         case rdma_done:
224                 goto out_drop;
225
226         case rdma_error:
227                 goto out_drop;
228
229         default:
230                 goto out_proc;
231         }
232
233         end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
234         p = xdr_check_read_list(rdma_argp + 4, end);
235         if (!p)
236                 goto out_inval;
237         p = xdr_check_write_list(p, end);
238         if (!p)
239                 goto out_inval;
240         p = xdr_check_reply_chunk(p, end);
241         if (!p)
242                 goto out_inval;
243         if (p > end)
244                 goto out_inval;
245
246         rq_arg->head[0].iov_base = p;
247         hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
248         rq_arg->head[0].iov_len -= hdr_len;
249         dprintk("svcrdma: received %s request for XID 0x%08x, hdr_len=%u\n",
250                 proc, be32_to_cpup(rdma_argp), hdr_len);
251         return hdr_len;
252
253 out_short:
254         dprintk("svcrdma: header too short = %d\n", rq_arg->len);
255         return -EINVAL;
256
257 out_version:
258         dprintk("svcrdma: bad xprt version: %u\n",
259                 be32_to_cpup(rdma_argp + 1));
260         return -EPROTONOSUPPORT;
261
262 out_drop:
263         dprintk("svcrdma: dropping RDMA_DONE/ERROR message\n");
264         return 0;
265
266 out_proc:
267         dprintk("svcrdma: bad rdma procedure (%u)\n",
268                 be32_to_cpup(rdma_argp + 3));
269         return -EINVAL;
270
271 out_inval:
272         dprintk("svcrdma: failed to parse transport header\n");
273         return -EINVAL;
274 }
275
276 /* Issue an RDMA_READ using the local lkey to map the data sink */
277 int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
278                         struct svc_rqst *rqstp,
279                         struct svc_rdma_op_ctxt *head,
280                         int *page_no,
281                         u32 *page_offset,
282                         u32 rs_handle,
283                         u32 rs_length,
284                         u64 rs_offset,
285                         bool last)
286 {
287         struct ib_rdma_wr read_wr;
288         int pages_needed = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
289         struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
290         int ret, read, pno;
291         u32 pg_off = *page_offset;
292         u32 pg_no = *page_no;
293
294         ctxt->direction = DMA_FROM_DEVICE;
295         ctxt->read_hdr = head;
296         pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
297         read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
298                      rs_length);
299
300         for (pno = 0; pno < pages_needed; pno++) {
301                 int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
302
303                 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
304                 head->arg.page_len += len;
305
306                 head->arg.len += len;
307                 if (!pg_off)
308                         head->count++;
309                 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
310                 rqstp->rq_next_page = rqstp->rq_respages + 1;
311                 ctxt->sge[pno].addr =
312                         ib_dma_map_page(xprt->sc_cm_id->device,
313                                         head->arg.pages[pg_no], pg_off,
314                                         PAGE_SIZE - pg_off,
315                                         DMA_FROM_DEVICE);
316                 ret = ib_dma_mapping_error(xprt->sc_cm_id->device,
317                                            ctxt->sge[pno].addr);
318                 if (ret)
319                         goto err;
320                 svc_rdma_count_mappings(xprt, ctxt);
321
322                 ctxt->sge[pno].lkey = xprt->sc_pd->local_dma_lkey;
323                 ctxt->sge[pno].length = len;
324                 ctxt->count++;
325
326                 /* adjust offset and wrap to next page if needed */
327                 pg_off += len;
328                 if (pg_off == PAGE_SIZE) {
329                         pg_off = 0;
330                         pg_no++;
331                 }
332                 rs_length -= len;
333         }
334
335         if (last && rs_length == 0)
336                 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
337         else
338                 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
339
340         memset(&read_wr, 0, sizeof(read_wr));
341         ctxt->cqe.done = svc_rdma_wc_read;
342         read_wr.wr.wr_cqe = &ctxt->cqe;
343         read_wr.wr.opcode = IB_WR_RDMA_READ;
344         read_wr.wr.send_flags = IB_SEND_SIGNALED;
345         read_wr.rkey = rs_handle;
346         read_wr.remote_addr = rs_offset;
347         read_wr.wr.sg_list = ctxt->sge;
348         read_wr.wr.num_sge = pages_needed;
349
350         ret = svc_rdma_send(xprt, &read_wr.wr);
351         if (ret) {
352                 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
353                 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
354                 goto err;
355         }
356
357         /* return current location in page array */
358         *page_no = pg_no;
359         *page_offset = pg_off;
360         ret = read;
361         atomic_inc(&rdma_stat_read);
362         return ret;
363  err:
364         svc_rdma_unmap_dma(ctxt);
365         svc_rdma_put_context(ctxt, 0);
366         return ret;
367 }
368
369 /* Issue an RDMA_READ using an FRMR to map the data sink */
370 int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
371                          struct svc_rqst *rqstp,
372                          struct svc_rdma_op_ctxt *head,
373                          int *page_no,
374                          u32 *page_offset,
375                          u32 rs_handle,
376                          u32 rs_length,
377                          u64 rs_offset,
378                          bool last)
379 {
380         struct ib_rdma_wr read_wr;
381         struct ib_send_wr inv_wr;
382         struct ib_reg_wr reg_wr;
383         u8 key;
384         int nents = PAGE_ALIGN(*page_offset + rs_length) >> PAGE_SHIFT;
385         struct svc_rdma_op_ctxt *ctxt = svc_rdma_get_context(xprt);
386         struct svc_rdma_fastreg_mr *frmr = svc_rdma_get_frmr(xprt);
387         int ret, read, pno, dma_nents, n;
388         u32 pg_off = *page_offset;
389         u32 pg_no = *page_no;
390
391         if (IS_ERR(frmr))
392                 return -ENOMEM;
393
394         ctxt->direction = DMA_FROM_DEVICE;
395         ctxt->frmr = frmr;
396         nents = min_t(unsigned int, nents, xprt->sc_frmr_pg_list_len);
397         read = min_t(int, (nents << PAGE_SHIFT) - *page_offset, rs_length);
398
399         frmr->direction = DMA_FROM_DEVICE;
400         frmr->access_flags = (IB_ACCESS_LOCAL_WRITE|IB_ACCESS_REMOTE_WRITE);
401         frmr->sg_nents = nents;
402
403         for (pno = 0; pno < nents; pno++) {
404                 int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
405
406                 head->arg.pages[pg_no] = rqstp->rq_arg.pages[pg_no];
407                 head->arg.page_len += len;
408                 head->arg.len += len;
409                 if (!pg_off)
410                         head->count++;
411
412                 sg_set_page(&frmr->sg[pno], rqstp->rq_arg.pages[pg_no],
413                             len, pg_off);
414
415                 rqstp->rq_respages = &rqstp->rq_arg.pages[pg_no+1];
416                 rqstp->rq_next_page = rqstp->rq_respages + 1;
417
418                 /* adjust offset and wrap to next page if needed */
419                 pg_off += len;
420                 if (pg_off == PAGE_SIZE) {
421                         pg_off = 0;
422                         pg_no++;
423                 }
424                 rs_length -= len;
425         }
426
427         if (last && rs_length == 0)
428                 set_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
429         else
430                 clear_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags);
431
432         dma_nents = ib_dma_map_sg(xprt->sc_cm_id->device,
433                                   frmr->sg, frmr->sg_nents,
434                                   frmr->direction);
435         if (!dma_nents) {
436                 pr_err("svcrdma: failed to dma map sg %p\n",
437                        frmr->sg);
438                 return -ENOMEM;
439         }
440
441         n = ib_map_mr_sg(frmr->mr, frmr->sg, frmr->sg_nents, NULL, PAGE_SIZE);
442         if (unlikely(n != frmr->sg_nents)) {
443                 pr_err("svcrdma: failed to map mr %p (%d/%d elements)\n",
444                        frmr->mr, n, frmr->sg_nents);
445                 return n < 0 ? n : -EINVAL;
446         }
447
448         /* Bump the key */
449         key = (u8)(frmr->mr->lkey & 0x000000FF);
450         ib_update_fast_reg_key(frmr->mr, ++key);
451
452         ctxt->sge[0].addr = frmr->mr->iova;
453         ctxt->sge[0].lkey = frmr->mr->lkey;
454         ctxt->sge[0].length = frmr->mr->length;
455         ctxt->count = 1;
456         ctxt->read_hdr = head;
457
458         /* Prepare REG WR */
459         ctxt->reg_cqe.done = svc_rdma_wc_reg;
460         reg_wr.wr.wr_cqe = &ctxt->reg_cqe;
461         reg_wr.wr.opcode = IB_WR_REG_MR;
462         reg_wr.wr.send_flags = IB_SEND_SIGNALED;
463         reg_wr.wr.num_sge = 0;
464         reg_wr.mr = frmr->mr;
465         reg_wr.key = frmr->mr->lkey;
466         reg_wr.access = frmr->access_flags;
467         reg_wr.wr.next = &read_wr.wr;
468
469         /* Prepare RDMA_READ */
470         memset(&read_wr, 0, sizeof(read_wr));
471         ctxt->cqe.done = svc_rdma_wc_read;
472         read_wr.wr.wr_cqe = &ctxt->cqe;
473         read_wr.wr.send_flags = IB_SEND_SIGNALED;
474         read_wr.rkey = rs_handle;
475         read_wr.remote_addr = rs_offset;
476         read_wr.wr.sg_list = ctxt->sge;
477         read_wr.wr.num_sge = 1;
478         if (xprt->sc_dev_caps & SVCRDMA_DEVCAP_READ_W_INV) {
479                 read_wr.wr.opcode = IB_WR_RDMA_READ_WITH_INV;
480                 read_wr.wr.ex.invalidate_rkey = ctxt->frmr->mr->lkey;
481         } else {
482                 read_wr.wr.opcode = IB_WR_RDMA_READ;
483                 read_wr.wr.next = &inv_wr;
484                 /* Prepare invalidate */
485                 memset(&inv_wr, 0, sizeof(inv_wr));
486                 ctxt->inv_cqe.done = svc_rdma_wc_inv;
487                 inv_wr.wr_cqe = &ctxt->inv_cqe;
488                 inv_wr.opcode = IB_WR_LOCAL_INV;
489                 inv_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_FENCE;
490                 inv_wr.ex.invalidate_rkey = frmr->mr->lkey;
491         }
492
493         /* Post the chain */
494         ret = svc_rdma_send(xprt, &reg_wr.wr);
495         if (ret) {
496                 pr_err("svcrdma: Error %d posting RDMA_READ\n", ret);
497                 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
498                 goto err;
499         }
500
501         /* return current location in page array */
502         *page_no = pg_no;
503         *page_offset = pg_off;
504         ret = read;
505         atomic_inc(&rdma_stat_read);
506         return ret;
507  err:
508         svc_rdma_put_context(ctxt, 0);
509         svc_rdma_put_frmr(xprt, frmr);
510         return ret;
511 }
512
513 /* If there was additional inline content, append it to the end of arg.pages.
514  * Tail copy has to be done after the reader function has determined how many
515  * pages are needed for RDMA READ.
516  */
517 static int
518 rdma_copy_tail(struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *head,
519                u32 position, u32 byte_count, u32 page_offset, int page_no)
520 {
521         char *srcp, *destp;
522
523         srcp = head->arg.head[0].iov_base + position;
524         byte_count = head->arg.head[0].iov_len - position;
525         if (byte_count > PAGE_SIZE) {
526                 dprintk("svcrdma: large tail unsupported\n");
527                 return 0;
528         }
529
530         /* Fit as much of the tail on the current page as possible */
531         if (page_offset != PAGE_SIZE) {
532                 destp = page_address(rqstp->rq_arg.pages[page_no]);
533                 destp += page_offset;
534                 while (byte_count--) {
535                         *destp++ = *srcp++;
536                         page_offset++;
537                         if (page_offset == PAGE_SIZE && byte_count)
538                                 goto more;
539                 }
540                 goto done;
541         }
542
543 more:
544         /* Fit the rest on the next page */
545         page_no++;
546         destp = page_address(rqstp->rq_arg.pages[page_no]);
547         while (byte_count--)
548                 *destp++ = *srcp++;
549
550         rqstp->rq_respages = &rqstp->rq_arg.pages[page_no+1];
551         rqstp->rq_next_page = rqstp->rq_respages + 1;
552
553 done:
554         byte_count = head->arg.head[0].iov_len - position;
555         head->arg.page_len += byte_count;
556         head->arg.len += byte_count;
557         head->arg.buflen += byte_count;
558         return 1;
559 }
560
561 /* Returns the address of the first read chunk or <nul> if no read chunk
562  * is present
563  */
564 static struct rpcrdma_read_chunk *
565 svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
566 {
567         struct rpcrdma_read_chunk *ch =
568                 (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
569
570         if (ch->rc_discrim == xdr_zero)
571                 return NULL;
572         return ch;
573 }
574
575 static int rdma_read_chunks(struct svcxprt_rdma *xprt,
576                             struct rpcrdma_msg *rmsgp,
577                             struct svc_rqst *rqstp,
578                             struct svc_rdma_op_ctxt *head)
579 {
580         int page_no, ret;
581         struct rpcrdma_read_chunk *ch;
582         u32 handle, page_offset, byte_count;
583         u32 position;
584         u64 rs_offset;
585         bool last;
586
587         /* If no read list is present, return 0 */
588         ch = svc_rdma_get_read_chunk(rmsgp);
589         if (!ch)
590                 return 0;
591
592         /* The request is completed when the RDMA_READs complete. The
593          * head context keeps all the pages that comprise the
594          * request.
595          */
596         head->arg.head[0] = rqstp->rq_arg.head[0];
597         head->arg.tail[0] = rqstp->rq_arg.tail[0];
598         head->hdr_count = head->count;
599         head->arg.page_base = 0;
600         head->arg.page_len = 0;
601         head->arg.len = rqstp->rq_arg.len;
602         head->arg.buflen = rqstp->rq_arg.buflen;
603
604         /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
605         position = be32_to_cpu(ch->rc_position);
606         if (position == 0) {
607                 head->arg.pages = &head->pages[0];
608                 page_offset = head->byte_len;
609         } else {
610                 head->arg.pages = &head->pages[head->count];
611                 page_offset = 0;
612         }
613
614         ret = 0;
615         page_no = 0;
616         for (; ch->rc_discrim != xdr_zero; ch++) {
617                 if (be32_to_cpu(ch->rc_position) != position)
618                         goto err;
619
620                 handle = be32_to_cpu(ch->rc_target.rs_handle),
621                 byte_count = be32_to_cpu(ch->rc_target.rs_length);
622                 xdr_decode_hyper((__be32 *)&ch->rc_target.rs_offset,
623                                  &rs_offset);
624
625                 while (byte_count > 0) {
626                         last = (ch + 1)->rc_discrim == xdr_zero;
627                         ret = xprt->sc_reader(xprt, rqstp, head,
628                                               &page_no, &page_offset,
629                                               handle, byte_count,
630                                               rs_offset, last);
631                         if (ret < 0)
632                                 goto err;
633                         byte_count -= ret;
634                         rs_offset += ret;
635                         head->arg.buflen += ret;
636                 }
637         }
638
639         /* Read list may need XDR round-up (see RFC 5666, s. 3.7) */
640         if (page_offset & 3) {
641                 u32 pad = 4 - (page_offset & 3);
642
643                 head->arg.tail[0].iov_len += pad;
644                 head->arg.len += pad;
645                 head->arg.buflen += pad;
646                 page_offset += pad;
647         }
648
649         ret = 1;
650         if (position && position < head->arg.head[0].iov_len)
651                 ret = rdma_copy_tail(rqstp, head, position,
652                                      byte_count, page_offset, page_no);
653         head->arg.head[0].iov_len = position;
654         head->position = position;
655
656  err:
657         /* Detach arg pages. svc_recv will replenish them */
658         for (page_no = 0;
659              &rqstp->rq_pages[page_no] < rqstp->rq_respages; page_no++)
660                 rqstp->rq_pages[page_no] = NULL;
661
662         return ret;
663 }
664
665 static void rdma_read_complete(struct svc_rqst *rqstp,
666                                struct svc_rdma_op_ctxt *head)
667 {
668         int page_no;
669
670         /* Copy RPC pages */
671         for (page_no = 0; page_no < head->count; page_no++) {
672                 put_page(rqstp->rq_pages[page_no]);
673                 rqstp->rq_pages[page_no] = head->pages[page_no];
674         }
675
676         /* Adjustments made for RDMA_NOMSG type requests */
677         if (head->position == 0) {
678                 if (head->arg.len <= head->sge[0].length) {
679                         head->arg.head[0].iov_len = head->arg.len -
680                                                         head->byte_len;
681                         head->arg.page_len = 0;
682                 } else {
683                         head->arg.head[0].iov_len = head->sge[0].length -
684                                                                 head->byte_len;
685                         head->arg.page_len = head->arg.len -
686                                                 head->sge[0].length;
687                 }
688         }
689
690         /* Point rq_arg.pages past header */
691         rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
692         rqstp->rq_arg.page_len = head->arg.page_len;
693         rqstp->rq_arg.page_base = head->arg.page_base;
694
695         /* rq_respages starts after the last arg page */
696         rqstp->rq_respages = &rqstp->rq_pages[page_no];
697         rqstp->rq_next_page = rqstp->rq_respages + 1;
698
699         /* Rebuild rq_arg head and tail. */
700         rqstp->rq_arg.head[0] = head->arg.head[0];
701         rqstp->rq_arg.tail[0] = head->arg.tail[0];
702         rqstp->rq_arg.len = head->arg.len;
703         rqstp->rq_arg.buflen = head->arg.buflen;
704 }
705
706 static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
707                                 __be32 *rdma_argp, int status)
708 {
709         struct svc_rdma_op_ctxt *ctxt;
710         __be32 *p, *err_msgp;
711         unsigned int length;
712         struct page *page;
713         int ret;
714
715         ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
716         if (ret)
717                 return;
718
719         page = alloc_page(GFP_KERNEL);
720         if (!page)
721                 return;
722         err_msgp = page_address(page);
723
724         p = err_msgp;
725         *p++ = *rdma_argp;
726         *p++ = *(rdma_argp + 1);
727         *p++ = xprt->sc_fc_credits;
728         *p++ = rdma_error;
729         if (status == -EPROTONOSUPPORT) {
730                 *p++ = err_vers;
731                 *p++ = rpcrdma_version;
732                 *p++ = rpcrdma_version;
733         } else {
734                 *p++ = err_chunk;
735         }
736         length = (unsigned long)p - (unsigned long)err_msgp;
737
738         /* Map transport header; no RPC message payload */
739         ctxt = svc_rdma_get_context(xprt);
740         ret = svc_rdma_map_reply_hdr(xprt, ctxt, err_msgp, length);
741         if (ret) {
742                 dprintk("svcrdma: Error %d mapping send for protocol error\n",
743                         ret);
744                 return;
745         }
746
747         ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0);
748         if (ret) {
749                 dprintk("svcrdma: Error %d posting send for protocol error\n",
750                         ret);
751                 svc_rdma_unmap_dma(ctxt);
752                 svc_rdma_put_context(ctxt, 1);
753         }
754 }
755
756 /* By convention, backchannel calls arrive via rdma_msg type
757  * messages, and never populate the chunk lists. This makes
758  * the RPC/RDMA header small and fixed in size, so it is
759  * straightforward to check the RPC header's direction field.
760  */
761 static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt,
762                                           __be32 *rdma_resp)
763 {
764         __be32 *p;
765
766         if (!xprt->xpt_bc_xprt)
767                 return false;
768
769         p = rdma_resp + 3;
770         if (*p++ != rdma_msg)
771                 return false;
772
773         if (*p++ != xdr_zero)
774                 return false;
775         if (*p++ != xdr_zero)
776                 return false;
777         if (*p++ != xdr_zero)
778                 return false;
779
780         /* XID sanity */
781         if (*p++ != *rdma_resp)
782                 return false;
783         /* call direction */
784         if (*p == cpu_to_be32(RPC_CALL))
785                 return false;
786
787         return true;
788 }
789
790 /*
791  * Set up the rqstp thread context to point to the RQ buffer. If
792  * necessary, pull additional data from the client with an RDMA_READ
793  * request.
794  */
795 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
796 {
797         struct svc_xprt *xprt = rqstp->rq_xprt;
798         struct svcxprt_rdma *rdma_xprt =
799                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
800         struct svc_rdma_op_ctxt *ctxt = NULL;
801         struct rpcrdma_msg *rmsgp;
802         int ret = 0;
803
804         dprintk("svcrdma: rqstp=%p\n", rqstp);
805
806         spin_lock(&rdma_xprt->sc_rq_dto_lock);
807         if (!list_empty(&rdma_xprt->sc_read_complete_q)) {
808                 ctxt = list_first_entry(&rdma_xprt->sc_read_complete_q,
809                                         struct svc_rdma_op_ctxt, list);
810                 list_del(&ctxt->list);
811                 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
812                 rdma_read_complete(rqstp, ctxt);
813                 goto complete;
814         } else if (!list_empty(&rdma_xprt->sc_rq_dto_q)) {
815                 ctxt = list_first_entry(&rdma_xprt->sc_rq_dto_q,
816                                         struct svc_rdma_op_ctxt, list);
817                 list_del(&ctxt->list);
818         } else {
819                 atomic_inc(&rdma_stat_rq_starve);
820                 clear_bit(XPT_DATA, &xprt->xpt_flags);
821                 ctxt = NULL;
822         }
823         spin_unlock(&rdma_xprt->sc_rq_dto_lock);
824         if (!ctxt) {
825                 /* This is the EAGAIN path. The svc_recv routine will
826                  * return -EAGAIN, the nfsd thread will go to call into
827                  * svc_recv again and we shouldn't be on the active
828                  * transport list
829                  */
830                 if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
831                         goto defer;
832                 goto out;
833         }
834         dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p\n",
835                 ctxt, rdma_xprt, rqstp);
836         atomic_inc(&rdma_stat_recv);
837
838         /* Build up the XDR from the receive buffers. */
839         rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
840
841         /* Decode the RDMA header. */
842         rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
843         ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
844         if (ret < 0)
845                 goto out_err;
846         if (ret == 0)
847                 goto out_drop;
848         rqstp->rq_xprt_hlen = ret;
849
850         if (svc_rdma_is_backchannel_reply(xprt, &rmsgp->rm_xid)) {
851                 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt,
852                                                &rmsgp->rm_xid,
853                                                &rqstp->rq_arg);
854                 svc_rdma_put_context(ctxt, 0);
855                 if (ret)
856                         goto repost;
857                 return ret;
858         }
859
860         /* Read read-list data. */
861         ret = rdma_read_chunks(rdma_xprt, rmsgp, rqstp, ctxt);
862         if (ret > 0) {
863                 /* read-list posted, defer until data received from client. */
864                 goto defer;
865         } else if (ret < 0) {
866                 /* Post of read-list failed, free context. */
867                 svc_rdma_put_context(ctxt, 1);
868                 return 0;
869         }
870
871 complete:
872         ret = rqstp->rq_arg.head[0].iov_len
873                 + rqstp->rq_arg.page_len
874                 + rqstp->rq_arg.tail[0].iov_len;
875         svc_rdma_put_context(ctxt, 0);
876  out:
877         dprintk("svcrdma: ret=%d, rq_arg.len=%u, "
878                 "rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n",
879                 ret, rqstp->rq_arg.len,
880                 rqstp->rq_arg.head[0].iov_base,
881                 rqstp->rq_arg.head[0].iov_len);
882         rqstp->rq_prot = IPPROTO_MAX;
883         svc_xprt_copy_addrs(rqstp, xprt);
884         return ret;
885
886 out_err:
887         svc_rdma_send_error(rdma_xprt, &rmsgp->rm_xid, ret);
888         svc_rdma_put_context(ctxt, 0);
889         return 0;
890
891 defer:
892         return 0;
893
894 out_drop:
895         svc_rdma_put_context(ctxt, 1);
896 repost:
897         return svc_rdma_repost_recv(rdma_xprt, GFP_KERNEL);
898 }