]> git.karo-electronics.de Git - mv-sheeva.git/blob - net/sunrpc/xprtrdma/svc_rdma_sendto.c
x86_64: fix incorrect comments
[mv-sheeva.git] / net / sunrpc / xprtrdma / svc_rdma_sendto.c
1 /*
2  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Author: Tom Tucker <tom@opengridcomputing.com>
40  */
41
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
49
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
51
52 /* Encode an XDR as an array of IB SGE
53  *
54  * Assumptions:
55  * - head[0] is physically contiguous.
56  * - tail[0] is physically contiguous.
57  * - pages[] is not physically or virtually contigous and consists of
58  *   PAGE_SIZE elements.
59  *
60  * Output:
61  * SGE[0]              reserved for RCPRDMA header
62  * SGE[1]              data from xdr->head[]
63  * SGE[2..sge_count-2] data from xdr->pages[]
64  * SGE[sge_count-1]    data from xdr->tail.
65  *
66  * The max SGE we need is the length of the XDR / pagesize + one for
67  * head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES
68  * reserves a page for both the request and the reply header, and this
69  * array is only concerned with the reply we are assured that we have
70  * on extra page for the RPCRMDA header.
71  */
72 static int fast_reg_xdr(struct svcxprt_rdma *xprt,
73                  struct xdr_buf *xdr,
74                  struct svc_rdma_req_map *vec)
75 {
76         int sge_no;
77         u32 sge_bytes;
78         u32 page_bytes;
79         u32 page_off;
80         int page_no = 0;
81         u8 *frva;
82         struct svc_rdma_fastreg_mr *frmr;
83
84         frmr = svc_rdma_get_frmr(xprt);
85         if (IS_ERR(frmr))
86                 return -ENOMEM;
87         vec->frmr = frmr;
88
89         /* Skip the RPCRDMA header */
90         sge_no = 1;
91
92         /* Map the head. */
93         frva = (void *)((unsigned long)(xdr->head[0].iov_base) & PAGE_MASK);
94         vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
95         vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
96         vec->count = 2;
97         sge_no++;
98
99         /* Build the FRMR */
100         frmr->kva = frva;
101         frmr->direction = DMA_TO_DEVICE;
102         frmr->access_flags = 0;
103         frmr->map_len = PAGE_SIZE;
104         frmr->page_list_len = 1;
105         frmr->page_list->page_list[page_no] =
106                 ib_dma_map_single(xprt->sc_cm_id->device,
107                                   (void *)xdr->head[0].iov_base,
108                                   PAGE_SIZE, DMA_TO_DEVICE);
109         if (ib_dma_mapping_error(xprt->sc_cm_id->device,
110                                  frmr->page_list->page_list[page_no]))
111                 goto fatal_err;
112         atomic_inc(&xprt->sc_dma_used);
113
114         page_off = xdr->page_base;
115         page_bytes = xdr->page_len + page_off;
116         if (!page_bytes)
117                 goto encode_tail;
118
119         /* Map the pages */
120         vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
121         vec->sge[sge_no].iov_len = page_bytes;
122         sge_no++;
123         while (page_bytes) {
124                 struct page *page;
125
126                 page = xdr->pages[page_no++];
127                 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
128                 page_bytes -= sge_bytes;
129
130                 frmr->page_list->page_list[page_no] =
131                         ib_dma_map_page(xprt->sc_cm_id->device, page, 0,
132                                           PAGE_SIZE, DMA_TO_DEVICE);
133                 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
134                                          frmr->page_list->page_list[page_no]))
135                         goto fatal_err;
136
137                 atomic_inc(&xprt->sc_dma_used);
138                 page_off = 0; /* reset for next time through loop */
139                 frmr->map_len += PAGE_SIZE;
140                 frmr->page_list_len++;
141         }
142         vec->count++;
143
144  encode_tail:
145         /* Map tail */
146         if (0 == xdr->tail[0].iov_len)
147                 goto done;
148
149         vec->count++;
150         vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
151
152         if (((unsigned long)xdr->tail[0].iov_base & PAGE_MASK) ==
153             ((unsigned long)xdr->head[0].iov_base & PAGE_MASK)) {
154                 /*
155                  * If head and tail use the same page, we don't need
156                  * to map it again.
157                  */
158                 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
159         } else {
160                 void *va;
161
162                 /* Map another page for the tail */
163                 page_off = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
164                 va = (void *)((unsigned long)xdr->tail[0].iov_base & PAGE_MASK);
165                 vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
166
167                 frmr->page_list->page_list[page_no] =
168                         ib_dma_map_single(xprt->sc_cm_id->device, va, PAGE_SIZE,
169                                           DMA_TO_DEVICE);
170                 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
171                                          frmr->page_list->page_list[page_no]))
172                         goto fatal_err;
173                 atomic_inc(&xprt->sc_dma_used);
174                 frmr->map_len += PAGE_SIZE;
175                 frmr->page_list_len++;
176         }
177
178  done:
179         if (svc_rdma_fastreg(xprt, frmr))
180                 goto fatal_err;
181
182         return 0;
183
184  fatal_err:
185         printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
186         svc_rdma_put_frmr(xprt, frmr);
187         return -EIO;
188 }
189
190 static int map_xdr(struct svcxprt_rdma *xprt,
191                    struct xdr_buf *xdr,
192                    struct svc_rdma_req_map *vec)
193 {
194         int sge_no;
195         u32 sge_bytes;
196         u32 page_bytes;
197         u32 page_off;
198         int page_no;
199
200         BUG_ON(xdr->len !=
201                (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
202
203         if (xprt->sc_frmr_pg_list_len)
204                 return fast_reg_xdr(xprt, xdr, vec);
205
206         /* Skip the first sge, this is for the RPCRDMA header */
207         sge_no = 1;
208
209         /* Head SGE */
210         vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
211         vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
212         sge_no++;
213
214         /* pages SGE */
215         page_no = 0;
216         page_bytes = xdr->page_len;
217         page_off = xdr->page_base;
218         while (page_bytes) {
219                 vec->sge[sge_no].iov_base =
220                         page_address(xdr->pages[page_no]) + page_off;
221                 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
222                 page_bytes -= sge_bytes;
223                 vec->sge[sge_no].iov_len = sge_bytes;
224
225                 sge_no++;
226                 page_no++;
227                 page_off = 0; /* reset for next time through loop */
228         }
229
230         /* Tail SGE */
231         if (xdr->tail[0].iov_len) {
232                 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
233                 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
234                 sge_no++;
235         }
236
237         dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
238                 "page_base %u page_len %u head_len %zu tail_len %zu\n",
239                 sge_no, page_no, xdr->page_base, xdr->page_len,
240                 xdr->head[0].iov_len, xdr->tail[0].iov_len);
241
242         vec->count = sge_no;
243         return 0;
244 }
245
246 /* Assumptions:
247  * - We are using FRMR
248  *     - or -
249  * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
250  */
251 static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
252                       u32 rmr, u64 to,
253                       u32 xdr_off, int write_len,
254                       struct svc_rdma_req_map *vec)
255 {
256         struct ib_send_wr write_wr;
257         struct ib_sge *sge;
258         int xdr_sge_no;
259         int sge_no;
260         int sge_bytes;
261         int sge_off;
262         int bc;
263         struct svc_rdma_op_ctxt *ctxt;
264
265         BUG_ON(vec->count > RPCSVC_MAXPAGES);
266         dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
267                 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
268                 rmr, (unsigned long long)to, xdr_off,
269                 write_len, vec->sge, vec->count);
270
271         ctxt = svc_rdma_get_context(xprt);
272         ctxt->direction = DMA_TO_DEVICE;
273         sge = ctxt->sge;
274
275         /* Find the SGE associated with xdr_off */
276         for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
277              xdr_sge_no++) {
278                 if (vec->sge[xdr_sge_no].iov_len > bc)
279                         break;
280                 bc -= vec->sge[xdr_sge_no].iov_len;
281         }
282
283         sge_off = bc;
284         bc = write_len;
285         sge_no = 0;
286
287         /* Copy the remaining SGE */
288         while (bc != 0) {
289                 sge_bytes = min_t(size_t,
290                           bc, vec->sge[xdr_sge_no].iov_len-sge_off);
291                 sge[sge_no].length = sge_bytes;
292                 if (!vec->frmr) {
293                         sge[sge_no].addr =
294                                 ib_dma_map_single(xprt->sc_cm_id->device,
295                                                   (void *)
296                                                   vec->sge[xdr_sge_no].iov_base + sge_off,
297                                                   sge_bytes, DMA_TO_DEVICE);
298                         if (ib_dma_mapping_error(xprt->sc_cm_id->device,
299                                                  sge[sge_no].addr))
300                                 goto err;
301                         atomic_inc(&xprt->sc_dma_used);
302                         sge[sge_no].lkey = xprt->sc_dma_lkey;
303                 } else {
304                         sge[sge_no].addr = (unsigned long)
305                                 vec->sge[xdr_sge_no].iov_base + sge_off;
306                         sge[sge_no].lkey = vec->frmr->mr->lkey;
307                 }
308                 ctxt->count++;
309                 ctxt->frmr = vec->frmr;
310                 sge_off = 0;
311                 sge_no++;
312                 xdr_sge_no++;
313                 BUG_ON(xdr_sge_no > vec->count);
314                 bc -= sge_bytes;
315         }
316
317         /* Prepare WRITE WR */
318         memset(&write_wr, 0, sizeof write_wr);
319         ctxt->wr_op = IB_WR_RDMA_WRITE;
320         write_wr.wr_id = (unsigned long)ctxt;
321         write_wr.sg_list = &sge[0];
322         write_wr.num_sge = sge_no;
323         write_wr.opcode = IB_WR_RDMA_WRITE;
324         write_wr.send_flags = IB_SEND_SIGNALED;
325         write_wr.wr.rdma.rkey = rmr;
326         write_wr.wr.rdma.remote_addr = to;
327
328         /* Post It */
329         atomic_inc(&rdma_stat_write);
330         if (svc_rdma_send(xprt, &write_wr))
331                 goto err;
332         return 0;
333  err:
334         svc_rdma_put_context(ctxt, 0);
335         /* Fatal error, close transport */
336         return -EIO;
337 }
338
339 static int send_write_chunks(struct svcxprt_rdma *xprt,
340                              struct rpcrdma_msg *rdma_argp,
341                              struct rpcrdma_msg *rdma_resp,
342                              struct svc_rqst *rqstp,
343                              struct svc_rdma_req_map *vec)
344 {
345         u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
346         int write_len;
347         int max_write;
348         u32 xdr_off;
349         int chunk_off;
350         int chunk_no;
351         struct rpcrdma_write_array *arg_ary;
352         struct rpcrdma_write_array *res_ary;
353         int ret;
354
355         arg_ary = svc_rdma_get_write_array(rdma_argp);
356         if (!arg_ary)
357                 return 0;
358         res_ary = (struct rpcrdma_write_array *)
359                 &rdma_resp->rm_body.rm_chunks[1];
360
361         if (vec->frmr)
362                 max_write = vec->frmr->map_len;
363         else
364                 max_write = xprt->sc_max_sge * PAGE_SIZE;
365
366         /* Write chunks start at the pagelist */
367         for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
368              xfer_len && chunk_no < arg_ary->wc_nchunks;
369              chunk_no++) {
370                 struct rpcrdma_segment *arg_ch;
371                 u64 rs_offset;
372
373                 arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
374                 write_len = min(xfer_len, arg_ch->rs_length);
375
376                 /* Prepare the response chunk given the length actually
377                  * written */
378                 rs_offset = get_unaligned(&(arg_ch->rs_offset));
379                 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
380                                             arg_ch->rs_handle,
381                                             rs_offset,
382                                             write_len);
383                 chunk_off = 0;
384                 while (write_len) {
385                         int this_write;
386                         this_write = min(write_len, max_write);
387                         ret = send_write(xprt, rqstp,
388                                          arg_ch->rs_handle,
389                                          rs_offset + chunk_off,
390                                          xdr_off,
391                                          this_write,
392                                          vec);
393                         if (ret) {
394                                 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
395                                         ret);
396                                 return -EIO;
397                         }
398                         chunk_off += this_write;
399                         xdr_off += this_write;
400                         xfer_len -= this_write;
401                         write_len -= this_write;
402                 }
403         }
404         /* Update the req with the number of chunks actually used */
405         svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
406
407         return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
408 }
409
410 static int send_reply_chunks(struct svcxprt_rdma *xprt,
411                              struct rpcrdma_msg *rdma_argp,
412                              struct rpcrdma_msg *rdma_resp,
413                              struct svc_rqst *rqstp,
414                              struct svc_rdma_req_map *vec)
415 {
416         u32 xfer_len = rqstp->rq_res.len;
417         int write_len;
418         int max_write;
419         u32 xdr_off;
420         int chunk_no;
421         int chunk_off;
422         struct rpcrdma_segment *ch;
423         struct rpcrdma_write_array *arg_ary;
424         struct rpcrdma_write_array *res_ary;
425         int ret;
426
427         arg_ary = svc_rdma_get_reply_array(rdma_argp);
428         if (!arg_ary)
429                 return 0;
430         /* XXX: need to fix when reply lists occur with read-list and or
431          * write-list */
432         res_ary = (struct rpcrdma_write_array *)
433                 &rdma_resp->rm_body.rm_chunks[2];
434
435         if (vec->frmr)
436                 max_write = vec->frmr->map_len;
437         else
438                 max_write = xprt->sc_max_sge * PAGE_SIZE;
439
440         /* xdr offset starts at RPC message */
441         for (xdr_off = 0, chunk_no = 0;
442              xfer_len && chunk_no < arg_ary->wc_nchunks;
443              chunk_no++) {
444                 u64 rs_offset;
445                 ch = &arg_ary->wc_array[chunk_no].wc_target;
446                 write_len = min(xfer_len, ch->rs_length);
447
448                 /* Prepare the reply chunk given the length actually
449                  * written */
450                 rs_offset = get_unaligned(&(ch->rs_offset));
451                 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
452                                             ch->rs_handle, rs_offset,
453                                             write_len);
454                 chunk_off = 0;
455                 while (write_len) {
456                         int this_write;
457
458                         this_write = min(write_len, max_write);
459                         ret = send_write(xprt, rqstp,
460                                          ch->rs_handle,
461                                          rs_offset + chunk_off,
462                                          xdr_off,
463                                          this_write,
464                                          vec);
465                         if (ret) {
466                                 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
467                                         ret);
468                                 return -EIO;
469                         }
470                         chunk_off += this_write;
471                         xdr_off += this_write;
472                         xfer_len -= this_write;
473                         write_len -= this_write;
474                 }
475         }
476         /* Update the req with the number of chunks actually used */
477         svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
478
479         return rqstp->rq_res.len;
480 }
481
482 /* This function prepares the portion of the RPCRDMA message to be
483  * sent in the RDMA_SEND. This function is called after data sent via
484  * RDMA has already been transmitted. There are three cases:
485  * - The RPCRDMA header, RPC header, and payload are all sent in a
486  *   single RDMA_SEND. This is the "inline" case.
487  * - The RPCRDMA header and some portion of the RPC header and data
488  *   are sent via this RDMA_SEND and another portion of the data is
489  *   sent via RDMA.
490  * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
491  *   header and data are all transmitted via RDMA.
492  * In all three cases, this function prepares the RPCRDMA header in
493  * sge[0], the 'type' parameter indicates the type to place in the
494  * RPCRDMA header, and the 'byte_count' field indicates how much of
495  * the XDR to include in this RDMA_SEND.
496  */
497 static int send_reply(struct svcxprt_rdma *rdma,
498                       struct svc_rqst *rqstp,
499                       struct page *page,
500                       struct rpcrdma_msg *rdma_resp,
501                       struct svc_rdma_op_ctxt *ctxt,
502                       struct svc_rdma_req_map *vec,
503                       int byte_count)
504 {
505         struct ib_send_wr send_wr;
506         struct ib_send_wr inv_wr;
507         int sge_no;
508         int sge_bytes;
509         int page_no;
510         int ret;
511
512         /* Post a recv buffer to handle another request. */
513         ret = svc_rdma_post_recv(rdma);
514         if (ret) {
515                 printk(KERN_INFO
516                        "svcrdma: could not post a receive buffer, err=%d."
517                        "Closing transport %p.\n", ret, rdma);
518                 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
519                 svc_rdma_put_context(ctxt, 0);
520                 return -ENOTCONN;
521         }
522
523         /* Prepare the context */
524         ctxt->pages[0] = page;
525         ctxt->count = 1;
526         ctxt->frmr = vec->frmr;
527         if (vec->frmr)
528                 set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
529         else
530                 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
531
532         /* Prepare the SGE for the RPCRDMA Header */
533         ctxt->sge[0].addr =
534                 ib_dma_map_page(rdma->sc_cm_id->device,
535                                 page, 0, PAGE_SIZE, DMA_TO_DEVICE);
536         if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
537                 goto err;
538         atomic_inc(&rdma->sc_dma_used);
539
540         ctxt->direction = DMA_TO_DEVICE;
541
542         ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
543         ctxt->sge[0].lkey = rdma->sc_dma_lkey;
544
545         /* Determine how many of our SGE are to be transmitted */
546         for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
547                 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
548                 byte_count -= sge_bytes;
549                 if (!vec->frmr) {
550                         ctxt->sge[sge_no].addr =
551                                 ib_dma_map_single(rdma->sc_cm_id->device,
552                                                   vec->sge[sge_no].iov_base,
553                                                   sge_bytes, DMA_TO_DEVICE);
554                         if (ib_dma_mapping_error(rdma->sc_cm_id->device,
555                                                  ctxt->sge[sge_no].addr))
556                                 goto err;
557                         atomic_inc(&rdma->sc_dma_used);
558                         ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
559                 } else {
560                         ctxt->sge[sge_no].addr = (unsigned long)
561                                 vec->sge[sge_no].iov_base;
562                         ctxt->sge[sge_no].lkey = vec->frmr->mr->lkey;
563                 }
564                 ctxt->sge[sge_no].length = sge_bytes;
565         }
566         BUG_ON(byte_count != 0);
567
568         /* Save all respages in the ctxt and remove them from the
569          * respages array. They are our pages until the I/O
570          * completes.
571          */
572         for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {
573                 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
574                 ctxt->count++;
575                 rqstp->rq_respages[page_no] = NULL;
576                 /*
577                  * If there are more pages than SGE, terminate SGE
578                  * list so that svc_rdma_unmap_dma doesn't attempt to
579                  * unmap garbage.
580                  */
581                 if (page_no+1 >= sge_no)
582                         ctxt->sge[page_no+1].length = 0;
583         }
584         BUG_ON(sge_no > rdma->sc_max_sge);
585         memset(&send_wr, 0, sizeof send_wr);
586         ctxt->wr_op = IB_WR_SEND;
587         send_wr.wr_id = (unsigned long)ctxt;
588         send_wr.sg_list = ctxt->sge;
589         send_wr.num_sge = sge_no;
590         send_wr.opcode = IB_WR_SEND;
591         send_wr.send_flags =  IB_SEND_SIGNALED;
592         if (vec->frmr) {
593                 /* Prepare INVALIDATE WR */
594                 memset(&inv_wr, 0, sizeof inv_wr);
595                 inv_wr.opcode = IB_WR_LOCAL_INV;
596                 inv_wr.send_flags = IB_SEND_SIGNALED;
597                 inv_wr.ex.invalidate_rkey =
598                         vec->frmr->mr->lkey;
599                 send_wr.next = &inv_wr;
600         }
601
602         ret = svc_rdma_send(rdma, &send_wr);
603         if (ret)
604                 goto err;
605
606         return 0;
607
608  err:
609         svc_rdma_put_frmr(rdma, vec->frmr);
610         svc_rdma_put_context(ctxt, 1);
611         return -EIO;
612 }
613
614 void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
615 {
616 }
617
618 /*
619  * Return the start of an xdr buffer.
620  */
621 static void *xdr_start(struct xdr_buf *xdr)
622 {
623         return xdr->head[0].iov_base -
624                 (xdr->len -
625                  xdr->page_len -
626                  xdr->tail[0].iov_len -
627                  xdr->head[0].iov_len);
628 }
629
630 int svc_rdma_sendto(struct svc_rqst *rqstp)
631 {
632         struct svc_xprt *xprt = rqstp->rq_xprt;
633         struct svcxprt_rdma *rdma =
634                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
635         struct rpcrdma_msg *rdma_argp;
636         struct rpcrdma_msg *rdma_resp;
637         struct rpcrdma_write_array *reply_ary;
638         enum rpcrdma_proc reply_type;
639         int ret;
640         int inline_bytes;
641         struct page *res_page;
642         struct svc_rdma_op_ctxt *ctxt;
643         struct svc_rdma_req_map *vec;
644
645         dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
646
647         /* Get the RDMA request header. */
648         rdma_argp = xdr_start(&rqstp->rq_arg);
649
650         /* Build an req vec for the XDR */
651         ctxt = svc_rdma_get_context(rdma);
652         ctxt->direction = DMA_TO_DEVICE;
653         vec = svc_rdma_get_req_map();
654         ret = map_xdr(rdma, &rqstp->rq_res, vec);
655         if (ret)
656                 goto err0;
657         inline_bytes = rqstp->rq_res.len;
658
659         /* Create the RDMA response header */
660         res_page = svc_rdma_get_page();
661         rdma_resp = page_address(res_page);
662         reply_ary = svc_rdma_get_reply_array(rdma_argp);
663         if (reply_ary)
664                 reply_type = RDMA_NOMSG;
665         else
666                 reply_type = RDMA_MSG;
667         svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
668                                          rdma_resp, reply_type);
669
670         /* Send any write-chunk data and build resp write-list */
671         ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
672                                 rqstp, vec);
673         if (ret < 0) {
674                 printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
675                        ret);
676                 goto err1;
677         }
678         inline_bytes -= ret;
679
680         /* Send any reply-list data and update resp reply-list */
681         ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
682                                 rqstp, vec);
683         if (ret < 0) {
684                 printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
685                        ret);
686                 goto err1;
687         }
688         inline_bytes -= ret;
689
690         ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
691                          inline_bytes);
692         svc_rdma_put_req_map(vec);
693         dprintk("svcrdma: send_reply returns %d\n", ret);
694         return ret;
695
696  err1:
697         put_page(res_page);
698  err0:
699         svc_rdma_put_req_map(vec);
700         svc_rdma_put_context(ctxt, 0);
701         return ret;
702 }