2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Encapsulates the major functions managing:
50 #include <linux/interrupt.h>
51 #include <linux/slab.h>
52 #include <asm/bitops.h>
54 #include "xprt_rdma.h"
61 # define RPCDBG_FACILITY RPCDBG_TRANS
69 * handle replies in tasklet context, using a single, global list
70 * rdma tasklet function -- just turn around and call the func
71 * for all replies on the list
74 static DEFINE_SPINLOCK(rpcrdma_tk_lock_g);
75 static LIST_HEAD(rpcrdma_tasklets_g);
78 rpcrdma_run_tasklet(unsigned long data)
80 struct rpcrdma_rep *rep;
81 void (*func)(struct rpcrdma_rep *);
85 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
86 while (!list_empty(&rpcrdma_tasklets_g)) {
87 rep = list_entry(rpcrdma_tasklets_g.next,
88 struct rpcrdma_rep, rr_list);
89 list_del(&rep->rr_list);
92 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
97 rpcrdma_recv_buffer_put(rep);
99 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
101 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
104 static DECLARE_TASKLET(rpcrdma_tasklet_g, rpcrdma_run_tasklet, 0UL);
107 rpcrdma_schedule_tasklet(struct rpcrdma_rep *rep)
111 spin_lock_irqsave(&rpcrdma_tk_lock_g, flags);
112 list_add_tail(&rep->rr_list, &rpcrdma_tasklets_g);
113 spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags);
114 tasklet_schedule(&rpcrdma_tasklet_g);
118 rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
120 struct rpcrdma_ep *ep = context;
122 dprintk("RPC: %s: QP error %X on device %s ep %p\n",
123 __func__, event->event, event->device->name, context);
124 if (ep->rep_connected == 1) {
125 ep->rep_connected = -EIO;
127 wake_up_all(&ep->rep_connect_wait);
132 rpcrdma_cq_async_error_upcall(struct ib_event *event, void *context)
134 struct rpcrdma_ep *ep = context;
136 dprintk("RPC: %s: CQ error %X on device %s ep %p\n",
137 __func__, event->event, event->device->name, context);
138 if (ep->rep_connected == 1) {
139 ep->rep_connected = -EIO;
141 wake_up_all(&ep->rep_connect_wait);
146 rpcrdma_sendcq_process_wc(struct ib_wc *wc)
148 struct rpcrdma_mw *frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
150 dprintk("RPC: %s: frmr %p status %X opcode %d\n",
151 __func__, frmr, wc->status, wc->opcode);
153 if (wc->wr_id == 0ULL)
155 if (wc->status != IB_WC_SUCCESS)
158 if (wc->opcode == IB_WC_FAST_REG_MR)
159 frmr->r.frmr.fr_state = FRMR_IS_VALID;
160 else if (wc->opcode == IB_WC_LOCAL_INV)
161 frmr->r.frmr.fr_state = FRMR_IS_INVALID;
165 rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
168 int budget, count, rc;
170 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
172 wcs = ep->rep_send_wcs;
174 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
180 rpcrdma_sendcq_process_wc(wcs++);
181 } while (rc == RPCRDMA_POLLSIZE && --budget);
186 * Handle send, fast_reg_mr, and local_inv completions.
188 * Send events are typically suppressed and thus do not result
189 * in an upcall. Occasionally one is signaled, however. This
190 * prevents the provider's completion queue from wrapping and
191 * losing a completion.
194 rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
196 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
199 rc = rpcrdma_sendcq_poll(cq, ep);
201 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
206 rc = ib_req_notify_cq(cq,
207 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
211 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
216 rpcrdma_sendcq_poll(cq, ep);
220 rpcrdma_recvcq_process_wc(struct ib_wc *wc)
222 struct rpcrdma_rep *rep =
223 (struct rpcrdma_rep *)(unsigned long)wc->wr_id;
225 dprintk("RPC: %s: rep %p status %X opcode %X length %u\n",
226 __func__, rep, wc->status, wc->opcode, wc->byte_len);
228 if (wc->status != IB_WC_SUCCESS) {
232 if (wc->opcode != IB_WC_RECV)
235 rep->rr_len = wc->byte_len;
236 ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device,
237 rep->rr_iov.addr, rep->rr_len, DMA_FROM_DEVICE);
239 if (rep->rr_len >= 16) {
240 struct rpcrdma_msg *p = (struct rpcrdma_msg *)rep->rr_base;
241 unsigned int credits = ntohl(p->rm_credit);
244 credits = 1; /* don't deadlock */
245 else if (credits > rep->rr_buffer->rb_max_requests)
246 credits = rep->rr_buffer->rb_max_requests;
247 atomic_set(&rep->rr_buffer->rb_credits, credits);
251 rpcrdma_schedule_tasklet(rep);
255 rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
258 int budget, count, rc;
260 budget = RPCRDMA_WC_BUDGET / RPCRDMA_POLLSIZE;
262 wcs = ep->rep_recv_wcs;
264 rc = ib_poll_cq(cq, RPCRDMA_POLLSIZE, wcs);
270 rpcrdma_recvcq_process_wc(wcs++);
271 } while (rc == RPCRDMA_POLLSIZE && --budget);
276 * Handle receive completions.
278 * It is reentrant but processes single events in order to maintain
279 * ordering of receives to keep server credits.
281 * It is the responsibility of the scheduled tasklet to return
282 * recv buffers to the pool. NOTE: this affects synchronization of
283 * connection shutdown. That is, the structures required for
284 * the completion of the reply handler must remain intact until
285 * all memory has been reclaimed.
288 rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
290 struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
293 rc = rpcrdma_recvcq_poll(cq, ep);
295 dprintk("RPC: %s: ib_poll_cq failed: %i\n",
300 rc = ib_req_notify_cq(cq,
301 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
305 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
310 rpcrdma_recvcq_poll(cq, ep);
314 rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
316 rpcrdma_recvcq_upcall(ep->rep_attr.recv_cq, ep);
317 rpcrdma_sendcq_upcall(ep->rep_attr.send_cq, ep);
321 static const char * const conn[] = {
338 rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
340 struct rpcrdma_xprt *xprt = id->context;
341 struct rpcrdma_ia *ia = &xprt->rx_ia;
342 struct rpcrdma_ep *ep = &xprt->rx_ep;
344 struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr;
346 struct ib_qp_attr attr;
347 struct ib_qp_init_attr iattr;
350 switch (event->event) {
351 case RDMA_CM_EVENT_ADDR_RESOLVED:
352 case RDMA_CM_EVENT_ROUTE_RESOLVED:
354 complete(&ia->ri_done);
356 case RDMA_CM_EVENT_ADDR_ERROR:
357 ia->ri_async_rc = -EHOSTUNREACH;
358 dprintk("RPC: %s: CM address resolution error, ep 0x%p\n",
360 complete(&ia->ri_done);
362 case RDMA_CM_EVENT_ROUTE_ERROR:
363 ia->ri_async_rc = -ENETUNREACH;
364 dprintk("RPC: %s: CM route resolution error, ep 0x%p\n",
366 complete(&ia->ri_done);
368 case RDMA_CM_EVENT_ESTABLISHED:
370 ib_query_qp(ia->ri_id->qp, &attr,
371 IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC,
373 dprintk("RPC: %s: %d responder resources"
375 __func__, attr.max_dest_rd_atomic, attr.max_rd_atomic);
377 case RDMA_CM_EVENT_CONNECT_ERROR:
378 connstate = -ENOTCONN;
380 case RDMA_CM_EVENT_UNREACHABLE:
381 connstate = -ENETDOWN;
383 case RDMA_CM_EVENT_REJECTED:
384 connstate = -ECONNREFUSED;
386 case RDMA_CM_EVENT_DISCONNECTED:
387 connstate = -ECONNABORTED;
389 case RDMA_CM_EVENT_DEVICE_REMOVAL:
392 dprintk("RPC: %s: %s: %pI4:%u (ep 0x%p event 0x%x)\n",
394 (event->event <= 11) ? conn[event->event] :
395 "unknown connection error",
396 &addr->sin_addr.s_addr,
397 ntohs(addr->sin_port),
399 atomic_set(&rpcx_to_rdmax(ep->rep_xprt)->rx_buf.rb_credits, 1);
400 dprintk("RPC: %s: %sconnected\n",
401 __func__, connstate > 0 ? "" : "dis");
402 ep->rep_connected = connstate;
404 wake_up_all(&ep->rep_connect_wait);
407 dprintk("RPC: %s: unexpected CM event %d\n",
408 __func__, event->event);
413 if (connstate == 1) {
414 int ird = attr.max_dest_rd_atomic;
415 int tird = ep->rep_remote_cma.responder_resources;
416 printk(KERN_INFO "rpcrdma: connection to %pI4:%u "
417 "on %s, memreg %d slots %d ird %d%s\n",
418 &addr->sin_addr.s_addr,
419 ntohs(addr->sin_port),
420 ia->ri_id->device->name,
421 ia->ri_memreg_strategy,
422 xprt->rx_buf.rb_max_requests,
423 ird, ird < 4 && ird < tird / 2 ? " (low!)" : "");
424 } else if (connstate < 0) {
425 printk(KERN_INFO "rpcrdma: connection to %pI4:%u closed (%d)\n",
426 &addr->sin_addr.s_addr,
427 ntohs(addr->sin_port),
435 static struct rdma_cm_id *
436 rpcrdma_create_id(struct rpcrdma_xprt *xprt,
437 struct rpcrdma_ia *ia, struct sockaddr *addr)
439 struct rdma_cm_id *id;
442 init_completion(&ia->ri_done);
444 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
447 dprintk("RPC: %s: rdma_create_id() failed %i\n",
452 ia->ri_async_rc = -ETIMEDOUT;
453 rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT);
455 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
459 wait_for_completion_interruptible_timeout(&ia->ri_done,
460 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
461 rc = ia->ri_async_rc;
465 ia->ri_async_rc = -ETIMEDOUT;
466 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
468 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
472 wait_for_completion_interruptible_timeout(&ia->ri_done,
473 msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1);
474 rc = ia->ri_async_rc;
486 * Drain any cq, prior to teardown.
489 rpcrdma_clean_cq(struct ib_cq *cq)
494 while (1 == ib_poll_cq(cq, 1, &wc))
498 dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
499 __func__, count, wc.opcode);
503 * Exported functions.
507 * Open and initialize an Interface Adapter.
508 * o initializes fields of struct rpcrdma_ia, including
509 * interface and provider attributes and protection zone.
512 rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
515 struct ib_device_attr devattr;
516 struct rpcrdma_ia *ia = &xprt->rx_ia;
518 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
519 if (IS_ERR(ia->ri_id)) {
520 rc = PTR_ERR(ia->ri_id);
524 ia->ri_pd = ib_alloc_pd(ia->ri_id->device);
525 if (IS_ERR(ia->ri_pd)) {
526 rc = PTR_ERR(ia->ri_pd);
527 dprintk("RPC: %s: ib_alloc_pd() failed %i\n",
533 * Query the device to determine if the requested memory
534 * registration strategy is supported. If it isn't, set the
535 * strategy to a globally supported model.
537 rc = ib_query_device(ia->ri_id->device, &devattr);
539 dprintk("RPC: %s: ib_query_device failed %d\n",
544 if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) {
545 ia->ri_have_dma_lkey = 1;
546 ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey;
549 if (memreg == RPCRDMA_FRMR) {
550 /* Requires both frmr reg and local dma lkey */
551 if ((devattr.device_cap_flags &
552 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) !=
553 (IB_DEVICE_MEM_MGT_EXTENSIONS|IB_DEVICE_LOCAL_DMA_LKEY)) {
554 dprintk("RPC: %s: FRMR registration "
555 "not supported by HCA\n", __func__);
556 memreg = RPCRDMA_MTHCAFMR;
558 /* Mind the ia limit on FRMR page list depth */
559 ia->ri_max_frmr_depth = min_t(unsigned int,
560 RPCRDMA_MAX_DATA_SEGS,
561 devattr.max_fast_reg_page_list_len);
564 if (memreg == RPCRDMA_MTHCAFMR) {
565 if (!ia->ri_id->device->alloc_fmr) {
566 dprintk("RPC: %s: MTHCAFMR registration "
567 "not supported by HCA\n", __func__);
568 #if RPCRDMA_PERSISTENT_REGISTRATION
569 memreg = RPCRDMA_ALLPHYSICAL;
578 * Optionally obtain an underlying physical identity mapping in
579 * order to do a memory window-based bind. This base registration
580 * is protected from remote access - that is enabled only by binding
581 * for the specific bytes targeted during each RPC operation, and
582 * revoked after the corresponding completion similar to a storage
588 #if RPCRDMA_PERSISTENT_REGISTRATION
589 case RPCRDMA_ALLPHYSICAL:
590 mem_priv = IB_ACCESS_LOCAL_WRITE |
591 IB_ACCESS_REMOTE_WRITE |
592 IB_ACCESS_REMOTE_READ;
595 case RPCRDMA_MTHCAFMR:
596 if (ia->ri_have_dma_lkey)
598 mem_priv = IB_ACCESS_LOCAL_WRITE;
599 #if RPCRDMA_PERSISTENT_REGISTRATION
602 ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv);
603 if (IS_ERR(ia->ri_bind_mem)) {
604 printk(KERN_ALERT "%s: ib_get_dma_mr for "
605 "phys register failed with %lX\n",
606 __func__, PTR_ERR(ia->ri_bind_mem));
612 printk(KERN_ERR "RPC: Unsupported memory "
613 "registration mode: %d\n", memreg);
617 dprintk("RPC: %s: memory registration strategy is %d\n",
620 /* Else will do memory reg/dereg for each chunk */
621 ia->ri_memreg_strategy = memreg;
623 rwlock_init(&ia->ri_qplock);
626 rdma_destroy_id(ia->ri_id);
633 * Clean up/close an IA.
634 * o if event handles and PD have been initialized, free them.
638 rpcrdma_ia_close(struct rpcrdma_ia *ia)
642 dprintk("RPC: %s: entering\n", __func__);
643 if (ia->ri_bind_mem != NULL) {
644 rc = ib_dereg_mr(ia->ri_bind_mem);
645 dprintk("RPC: %s: ib_dereg_mr returned %i\n",
648 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
650 rdma_destroy_qp(ia->ri_id);
651 rdma_destroy_id(ia->ri_id);
654 if (ia->ri_pd != NULL && !IS_ERR(ia->ri_pd)) {
655 rc = ib_dealloc_pd(ia->ri_pd);
656 dprintk("RPC: %s: ib_dealloc_pd returned %i\n",
662 * Create unconnected endpoint.
665 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
666 struct rpcrdma_create_data_internal *cdata)
668 struct ib_device_attr devattr;
669 struct ib_cq *sendcq, *recvcq;
672 rc = ib_query_device(ia->ri_id->device, &devattr);
674 dprintk("RPC: %s: ib_query_device failed %d\n",
679 /* check provider's send/recv wr limits */
680 if (cdata->max_requests > devattr.max_qp_wr)
681 cdata->max_requests = devattr.max_qp_wr;
683 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
684 ep->rep_attr.qp_context = ep;
685 /* send_cq and recv_cq initialized below */
686 ep->rep_attr.srq = NULL;
687 ep->rep_attr.cap.max_send_wr = cdata->max_requests;
688 switch (ia->ri_memreg_strategy) {
692 /* Add room for frmr register and invalidate WRs.
693 * 1. FRMR reg WR for head
694 * 2. FRMR invalidate WR for head
695 * 3. N FRMR reg WRs for pagelist
696 * 4. N FRMR invalidate WRs for pagelist
697 * 5. FRMR reg WR for tail
698 * 6. FRMR invalidate WR for tail
699 * 7. The RDMA_SEND WR
702 /* Calculate N if the device max FRMR depth is smaller than
703 * RPCRDMA_MAX_DATA_SEGS.
705 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
706 int delta = RPCRDMA_MAX_DATA_SEGS -
707 ia->ri_max_frmr_depth;
710 depth += 2; /* FRMR reg + invalidate */
711 delta -= ia->ri_max_frmr_depth;
715 ep->rep_attr.cap.max_send_wr *= depth;
716 if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) {
717 cdata->max_requests = devattr.max_qp_wr / depth;
718 if (!cdata->max_requests)
720 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
728 ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
729 ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2);
730 ep->rep_attr.cap.max_recv_sge = 1;
731 ep->rep_attr.cap.max_inline_data = 0;
732 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
733 ep->rep_attr.qp_type = IB_QPT_RC;
734 ep->rep_attr.port_num = ~0;
736 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
737 "iovs: send %d recv %d\n",
739 ep->rep_attr.cap.max_send_wr,
740 ep->rep_attr.cap.max_recv_wr,
741 ep->rep_attr.cap.max_send_sge,
742 ep->rep_attr.cap.max_recv_sge);
744 /* set trigger for requesting send completion */
745 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
746 if (ep->rep_cqinit <= 2)
750 init_waitqueue_head(&ep->rep_connect_wait);
751 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
753 sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall,
754 rpcrdma_cq_async_error_upcall, ep,
755 ep->rep_attr.cap.max_send_wr + 1, 0);
756 if (IS_ERR(sendcq)) {
757 rc = PTR_ERR(sendcq);
758 dprintk("RPC: %s: failed to create send CQ: %i\n",
763 rc = ib_req_notify_cq(sendcq, IB_CQ_NEXT_COMP);
765 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
770 recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall,
771 rpcrdma_cq_async_error_upcall, ep,
772 ep->rep_attr.cap.max_recv_wr + 1, 0);
773 if (IS_ERR(recvcq)) {
774 rc = PTR_ERR(recvcq);
775 dprintk("RPC: %s: failed to create recv CQ: %i\n",
780 rc = ib_req_notify_cq(recvcq, IB_CQ_NEXT_COMP);
782 dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
784 ib_destroy_cq(recvcq);
788 ep->rep_attr.send_cq = sendcq;
789 ep->rep_attr.recv_cq = recvcq;
791 /* Initialize cma parameters */
793 /* RPC/RDMA does not use private data */
794 ep->rep_remote_cma.private_data = NULL;
795 ep->rep_remote_cma.private_data_len = 0;
797 /* Client offers RDMA Read but does not initiate */
798 ep->rep_remote_cma.initiator_depth = 0;
799 if (devattr.max_qp_rd_atom > 32) /* arbitrary but <= 255 */
800 ep->rep_remote_cma.responder_resources = 32;
802 ep->rep_remote_cma.responder_resources = devattr.max_qp_rd_atom;
804 ep->rep_remote_cma.retry_count = 7;
805 ep->rep_remote_cma.flow_control = 0;
806 ep->rep_remote_cma.rnr_retry_count = 0;
811 err = ib_destroy_cq(sendcq);
813 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
822 * Disconnect and destroy endpoint. After this, the only
823 * valid operations on the ep are to free it (if dynamically
824 * allocated) or re-create it.
827 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
831 dprintk("RPC: %s: entering, connected is %d\n",
832 __func__, ep->rep_connected);
834 cancel_delayed_work_sync(&ep->rep_connect_worker);
837 rc = rpcrdma_ep_disconnect(ep, ia);
839 dprintk("RPC: %s: rpcrdma_ep_disconnect"
840 " returned %i\n", __func__, rc);
841 rdma_destroy_qp(ia->ri_id);
842 ia->ri_id->qp = NULL;
845 /* padding - could be done in rpcrdma_buffer_destroy... */
846 if (ep->rep_pad_mr) {
847 rpcrdma_deregister_internal(ia, ep->rep_pad_mr, &ep->rep_pad);
848 ep->rep_pad_mr = NULL;
851 rpcrdma_clean_cq(ep->rep_attr.recv_cq);
852 rc = ib_destroy_cq(ep->rep_attr.recv_cq);
854 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
857 rpcrdma_clean_cq(ep->rep_attr.send_cq);
858 rc = ib_destroy_cq(ep->rep_attr.send_cq);
860 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
865 * Connect unconnected endpoint.
868 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
870 struct rdma_cm_id *id, *old;
874 if (ep->rep_connected != 0) {
875 struct rpcrdma_xprt *xprt;
877 dprintk("RPC: %s: reconnecting...\n", __func__);
878 rc = rpcrdma_ep_disconnect(ep, ia);
879 if (rc && rc != -ENOTCONN)
880 dprintk("RPC: %s: rpcrdma_ep_disconnect"
881 " status %i\n", __func__, rc);
882 rpcrdma_flush_cqs(ep);
884 xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
885 id = rpcrdma_create_id(xprt, ia,
886 (struct sockaddr *)&xprt->rx_data.addr);
891 /* TEMP TEMP TEMP - fail if new device:
892 * Deregister/remarshal *all* requests!
893 * Close and recreate adapter, pd, etc!
894 * Re-determine all attributes still sane!
895 * More stuff I haven't thought of!
898 if (ia->ri_id->device != id->device) {
899 printk("RPC: %s: can't reconnect on "
900 "different device!\n", __func__);
906 rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
908 dprintk("RPC: %s: rdma_create_qp failed %i\n",
915 write_lock(&ia->ri_qplock);
918 write_unlock(&ia->ri_qplock);
920 rdma_destroy_qp(old);
921 rdma_destroy_id(old);
923 dprintk("RPC: %s: connecting...\n", __func__);
924 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
926 dprintk("RPC: %s: rdma_create_qp failed %i\n",
928 /* do not update ep->rep_connected */
933 ep->rep_connected = 0;
935 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
937 dprintk("RPC: %s: rdma_connect() failed with %i\n",
942 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
945 * Check state. A non-peer reject indicates no listener
946 * (ECONNREFUSED), which may be a transient state. All
947 * others indicate a transport condition which has already
948 * undergone a best-effort.
950 if (ep->rep_connected == -ECONNREFUSED &&
951 ++retry_count <= RDMA_CONNECT_RETRY_MAX) {
952 dprintk("RPC: %s: non-peer_reject, retry\n", __func__);
955 if (ep->rep_connected <= 0) {
956 /* Sometimes, the only way to reliably connect to remote
957 * CMs is to use same nonzero values for ORD and IRD. */
958 if (retry_count++ <= RDMA_CONNECT_RETRY_MAX + 1 &&
959 (ep->rep_remote_cma.responder_resources == 0 ||
960 ep->rep_remote_cma.initiator_depth !=
961 ep->rep_remote_cma.responder_resources)) {
962 if (ep->rep_remote_cma.responder_resources == 0)
963 ep->rep_remote_cma.responder_resources = 1;
964 ep->rep_remote_cma.initiator_depth =
965 ep->rep_remote_cma.responder_resources;
968 rc = ep->rep_connected;
970 dprintk("RPC: %s: connected\n", __func__);
975 ep->rep_connected = rc;
980 * rpcrdma_ep_disconnect
982 * This is separate from destroy to facilitate the ability
983 * to reconnect without recreating the endpoint.
985 * This call is not reentrant, and must not be made in parallel
986 * on the same endpoint.
989 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
993 rpcrdma_flush_cqs(ep);
994 rc = rdma_disconnect(ia->ri_id);
996 /* returns without wait if not connected */
997 wait_event_interruptible(ep->rep_connect_wait,
998 ep->rep_connected != 1);
999 dprintk("RPC: %s: after wait, %sconnected\n", __func__,
1000 (ep->rep_connected == 1) ? "still " : "dis");
1002 dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
1003 ep->rep_connected = rc;
1009 * Initialize buffer memory
1012 rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,
1013 struct rpcrdma_ia *ia, struct rpcrdma_create_data_internal *cdata)
1016 size_t len, rlen, wlen;
1018 struct rpcrdma_mw *r;
1020 buf->rb_max_requests = cdata->max_requests;
1021 spin_lock_init(&buf->rb_lock);
1022 atomic_set(&buf->rb_credits, 1);
1024 /* Need to allocate:
1025 * 1. arrays for send and recv pointers
1026 * 2. arrays of struct rpcrdma_req to fill in pointers
1027 * 3. array of struct rpcrdma_rep for replies
1028 * 4. padding, if any
1029 * 5. mw's, fmr's or frmr's, if any
1030 * Send/recv buffers in req/rep need to be registered
1033 len = buf->rb_max_requests *
1034 (sizeof(struct rpcrdma_req *) + sizeof(struct rpcrdma_rep *));
1035 len += cdata->padding;
1036 switch (ia->ri_memreg_strategy) {
1038 len += buf->rb_max_requests * RPCRDMA_MAX_SEGS *
1039 sizeof(struct rpcrdma_mw);
1041 case RPCRDMA_MTHCAFMR:
1042 /* TBD we are perhaps overallocating here */
1043 len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS *
1044 sizeof(struct rpcrdma_mw);
1050 /* allocate 1, 4 and 5 in one shot */
1051 p = kzalloc(len, GFP_KERNEL);
1053 dprintk("RPC: %s: req_t/rep_t/pad kzalloc(%zd) failed\n",
1058 buf->rb_pool = p; /* for freeing it later */
1060 buf->rb_send_bufs = (struct rpcrdma_req **) p;
1061 p = (char *) &buf->rb_send_bufs[buf->rb_max_requests];
1062 buf->rb_recv_bufs = (struct rpcrdma_rep **) p;
1063 p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests];
1066 * Register the zeroed pad buffer, if any.
1068 if (cdata->padding) {
1069 rc = rpcrdma_register_internal(ia, p, cdata->padding,
1070 &ep->rep_pad_mr, &ep->rep_pad);
1074 p += cdata->padding;
1076 INIT_LIST_HEAD(&buf->rb_mws);
1077 INIT_LIST_HEAD(&buf->rb_all);
1078 r = (struct rpcrdma_mw *)p;
1079 switch (ia->ri_memreg_strategy) {
1081 for (i = buf->rb_max_requests * RPCRDMA_MAX_SEGS; i; i--) {
1082 r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd,
1083 ia->ri_max_frmr_depth);
1084 if (IS_ERR(r->r.frmr.fr_mr)) {
1085 rc = PTR_ERR(r->r.frmr.fr_mr);
1086 dprintk("RPC: %s: ib_alloc_fast_reg_mr"
1087 " failed %i\n", __func__, rc);
1090 r->r.frmr.fr_pgl = ib_alloc_fast_reg_page_list(
1092 ia->ri_max_frmr_depth);
1093 if (IS_ERR(r->r.frmr.fr_pgl)) {
1094 rc = PTR_ERR(r->r.frmr.fr_pgl);
1096 "ib_alloc_fast_reg_page_list "
1097 "failed %i\n", __func__, rc);
1099 ib_dereg_mr(r->r.frmr.fr_mr);
1102 list_add(&r->mw_all, &buf->rb_all);
1103 list_add(&r->mw_list, &buf->rb_mws);
1107 case RPCRDMA_MTHCAFMR:
1108 /* TBD we are perhaps overallocating here */
1109 for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) {
1110 static struct ib_fmr_attr fa =
1111 { RPCRDMA_MAX_DATA_SEGS, 1, PAGE_SHIFT };
1112 r->r.fmr = ib_alloc_fmr(ia->ri_pd,
1113 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ,
1115 if (IS_ERR(r->r.fmr)) {
1116 rc = PTR_ERR(r->r.fmr);
1117 dprintk("RPC: %s: ib_alloc_fmr"
1118 " failed %i\n", __func__, rc);
1121 list_add(&r->mw_all, &buf->rb_all);
1122 list_add(&r->mw_list, &buf->rb_mws);
1131 * Allocate/init the request/reply buffers. Doing this
1132 * using kmalloc for now -- one for each buf.
1134 wlen = 1 << fls(cdata->inline_wsize + sizeof(struct rpcrdma_req));
1135 rlen = 1 << fls(cdata->inline_rsize + sizeof(struct rpcrdma_rep));
1136 dprintk("RPC: %s: wlen = %zu, rlen = %zu\n",
1137 __func__, wlen, rlen);
1139 for (i = 0; i < buf->rb_max_requests; i++) {
1140 struct rpcrdma_req *req;
1141 struct rpcrdma_rep *rep;
1143 req = kmalloc(wlen, GFP_KERNEL);
1145 dprintk("RPC: %s: request buffer %d alloc"
1146 " failed\n", __func__, i);
1150 memset(req, 0, sizeof(struct rpcrdma_req));
1151 buf->rb_send_bufs[i] = req;
1152 buf->rb_send_bufs[i]->rl_buffer = buf;
1154 rc = rpcrdma_register_internal(ia, req->rl_base,
1155 wlen - offsetof(struct rpcrdma_req, rl_base),
1156 &buf->rb_send_bufs[i]->rl_handle,
1157 &buf->rb_send_bufs[i]->rl_iov);
1161 buf->rb_send_bufs[i]->rl_size = wlen -
1162 sizeof(struct rpcrdma_req);
1164 rep = kmalloc(rlen, GFP_KERNEL);
1166 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1171 memset(rep, 0, sizeof(struct rpcrdma_rep));
1172 buf->rb_recv_bufs[i] = rep;
1173 buf->rb_recv_bufs[i]->rr_buffer = buf;
1175 rc = rpcrdma_register_internal(ia, rep->rr_base,
1176 rlen - offsetof(struct rpcrdma_rep, rr_base),
1177 &buf->rb_recv_bufs[i]->rr_handle,
1178 &buf->rb_recv_bufs[i]->rr_iov);
1183 dprintk("RPC: %s: max_requests %d\n",
1184 __func__, buf->rb_max_requests);
1188 rpcrdma_buffer_destroy(buf);
1193 * Unregister and destroy buffer memory. Need to deal with
1194 * partial initialization, so it's callable from failed create.
1195 * Must be called before destroying endpoint, as registrations
1199 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1202 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1203 struct rpcrdma_mw *r;
1205 /* clean up in reverse order from create
1206 * 1. recv mr memory (mr free, then kfree)
1207 * 2. send mr memory (mr free, then kfree)
1208 * 3. padding (if any) [moved to rpcrdma_ep_destroy]
1211 dprintk("RPC: %s: entering\n", __func__);
1213 for (i = 0; i < buf->rb_max_requests; i++) {
1214 if (buf->rb_recv_bufs && buf->rb_recv_bufs[i]) {
1215 rpcrdma_deregister_internal(ia,
1216 buf->rb_recv_bufs[i]->rr_handle,
1217 &buf->rb_recv_bufs[i]->rr_iov);
1218 kfree(buf->rb_recv_bufs[i]);
1220 if (buf->rb_send_bufs && buf->rb_send_bufs[i]) {
1221 rpcrdma_deregister_internal(ia,
1222 buf->rb_send_bufs[i]->rl_handle,
1223 &buf->rb_send_bufs[i]->rl_iov);
1224 kfree(buf->rb_send_bufs[i]);
1228 while (!list_empty(&buf->rb_mws)) {
1229 r = list_entry(buf->rb_mws.next,
1230 struct rpcrdma_mw, mw_list);
1231 list_del(&r->mw_all);
1232 list_del(&r->mw_list);
1233 switch (ia->ri_memreg_strategy) {
1235 rc = ib_dereg_mr(r->r.frmr.fr_mr);
1241 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
1243 case RPCRDMA_MTHCAFMR:
1244 rc = ib_dealloc_fmr(r->r.fmr);
1256 kfree(buf->rb_pool);
1259 /* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving
1260 * some req segments uninitialized.
1263 rpcrdma_buffer_put_mr(struct rpcrdma_mw **mw, struct rpcrdma_buffer *buf)
1266 list_add_tail(&(*mw)->mw_list, &buf->rb_mws);
1271 /* Cycle mw's back in reverse order, and "spin" them.
1272 * This delays and scrambles reuse as much as possible.
1275 rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1277 struct rpcrdma_mr_seg *seg = req->rl_segments;
1278 struct rpcrdma_mr_seg *seg1 = seg;
1281 for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++)
1282 rpcrdma_buffer_put_mr(&seg->mr_chunk.rl_mw, buf);
1283 rpcrdma_buffer_put_mr(&seg1->mr_chunk.rl_mw, buf);
1287 rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1289 buf->rb_send_bufs[--buf->rb_send_index] = req;
1291 if (req->rl_reply) {
1292 buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply;
1293 req->rl_reply->rr_func = NULL;
1294 req->rl_reply = NULL;
1298 static struct rpcrdma_req *
1299 rpcrdma_buffer_get_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf)
1301 struct rpcrdma_mw *r;
1304 i = RPCRDMA_MAX_SEGS - 1;
1305 while (!list_empty(&buf->rb_mws)) {
1306 r = list_entry(buf->rb_mws.next,
1307 struct rpcrdma_mw, mw_list);
1308 list_del(&r->mw_list);
1309 req->rl_segments[i].mr_chunk.rl_mw = r;
1310 if (unlikely(i-- == 0))
1311 return req; /* Success */
1314 /* Not enough entries on rb_mws for this req */
1315 rpcrdma_buffer_put_sendbuf(req, buf);
1316 rpcrdma_buffer_put_mrs(req, buf);
1321 * Get a set of request/reply buffers.
1323 * Reply buffer (if needed) is attached to send buffer upon return.
1325 * rb_send_index and rb_recv_index MUST always be pointing to the
1326 * *next* available buffer (non-NULL). They are incremented after
1327 * removing buffers, and decremented *before* returning them.
1329 struct rpcrdma_req *
1330 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1332 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
1333 struct rpcrdma_req *req;
1334 unsigned long flags;
1336 spin_lock_irqsave(&buffers->rb_lock, flags);
1337 if (buffers->rb_send_index == buffers->rb_max_requests) {
1338 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1339 dprintk("RPC: %s: out of request buffers\n", __func__);
1340 return ((struct rpcrdma_req *)NULL);
1343 req = buffers->rb_send_bufs[buffers->rb_send_index];
1344 if (buffers->rb_send_index < buffers->rb_recv_index) {
1345 dprintk("RPC: %s: %d extra receives outstanding (ok)\n",
1347 buffers->rb_recv_index - buffers->rb_send_index);
1348 req->rl_reply = NULL;
1350 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1351 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1353 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL;
1354 switch (ia->ri_memreg_strategy) {
1356 case RPCRDMA_MTHCAFMR:
1357 req = rpcrdma_buffer_get_mrs(req, buffers);
1362 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1367 * Put request/reply buffers back into pool.
1368 * Pre-decrement counter/array index.
1371 rpcrdma_buffer_put(struct rpcrdma_req *req)
1373 struct rpcrdma_buffer *buffers = req->rl_buffer;
1374 struct rpcrdma_ia *ia = rdmab_to_ia(buffers);
1375 unsigned long flags;
1377 spin_lock_irqsave(&buffers->rb_lock, flags);
1378 rpcrdma_buffer_put_sendbuf(req, buffers);
1379 switch (ia->ri_memreg_strategy) {
1381 case RPCRDMA_MTHCAFMR:
1382 rpcrdma_buffer_put_mrs(req, buffers);
1387 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1391 * Recover reply buffers from pool.
1392 * This happens when recovering from error conditions.
1393 * Post-increment counter/array index.
1396 rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1398 struct rpcrdma_buffer *buffers = req->rl_buffer;
1399 unsigned long flags;
1401 if (req->rl_iov.length == 0) /* special case xprt_rdma_allocate() */
1402 buffers = ((struct rpcrdma_req *) buffers)->rl_buffer;
1403 spin_lock_irqsave(&buffers->rb_lock, flags);
1404 if (buffers->rb_recv_index < buffers->rb_max_requests) {
1405 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index];
1406 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL;
1408 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1412 * Put reply buffers back into pool when not attached to
1413 * request. This happens in error conditions.
1416 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1418 struct rpcrdma_buffer *buffers = rep->rr_buffer;
1419 unsigned long flags;
1421 rep->rr_func = NULL;
1422 spin_lock_irqsave(&buffers->rb_lock, flags);
1423 buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep;
1424 spin_unlock_irqrestore(&buffers->rb_lock, flags);
1428 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1432 rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len,
1433 struct ib_mr **mrp, struct ib_sge *iov)
1435 struct ib_phys_buf ipb;
1440 * All memory passed here was kmalloc'ed, therefore phys-contiguous.
1442 iov->addr = ib_dma_map_single(ia->ri_id->device,
1443 va, len, DMA_BIDIRECTIONAL);
1444 if (ib_dma_mapping_error(ia->ri_id->device, iov->addr))
1449 if (ia->ri_have_dma_lkey) {
1451 iov->lkey = ia->ri_dma_lkey;
1453 } else if (ia->ri_bind_mem != NULL) {
1455 iov->lkey = ia->ri_bind_mem->lkey;
1459 ipb.addr = iov->addr;
1460 ipb.size = iov->length;
1461 mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1,
1462 IB_ACCESS_LOCAL_WRITE, &iov->addr);
1464 dprintk("RPC: %s: phys convert: 0x%llx "
1465 "registered 0x%llx length %d\n",
1466 __func__, (unsigned long long)ipb.addr,
1467 (unsigned long long)iov->addr, len);
1472 dprintk("RPC: %s: failed with %i\n", __func__, rc);
1475 iov->lkey = mr->lkey;
1483 rpcrdma_deregister_internal(struct rpcrdma_ia *ia,
1484 struct ib_mr *mr, struct ib_sge *iov)
1488 ib_dma_unmap_single(ia->ri_id->device,
1489 iov->addr, iov->length, DMA_BIDIRECTIONAL);
1494 rc = ib_dereg_mr(mr);
1496 dprintk("RPC: %s: ib_dereg_mr failed %i\n", __func__, rc);
1501 * Wrappers for chunk registration, shared by read/write chunk code.
1505 rpcrdma_map_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg, int writing)
1507 seg->mr_dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1508 seg->mr_dmalen = seg->mr_len;
1510 seg->mr_dma = ib_dma_map_page(ia->ri_id->device,
1511 seg->mr_page, offset_in_page(seg->mr_offset),
1512 seg->mr_dmalen, seg->mr_dir);
1514 seg->mr_dma = ib_dma_map_single(ia->ri_id->device,
1516 seg->mr_dmalen, seg->mr_dir);
1517 if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) {
1518 dprintk("RPC: %s: mr_dma %llx mr_offset %p mr_dma_len %zu\n",
1520 (unsigned long long)seg->mr_dma,
1521 seg->mr_offset, seg->mr_dmalen);
1526 rpcrdma_unmap_one(struct rpcrdma_ia *ia, struct rpcrdma_mr_seg *seg)
1529 ib_dma_unmap_page(ia->ri_id->device,
1530 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1532 ib_dma_unmap_single(ia->ri_id->device,
1533 seg->mr_dma, seg->mr_dmalen, seg->mr_dir);
1537 rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
1538 int *nsegs, int writing, struct rpcrdma_ia *ia,
1539 struct rpcrdma_xprt *r_xprt)
1541 struct rpcrdma_mr_seg *seg1 = seg;
1542 struct rpcrdma_mw *mw = seg1->mr_chunk.rl_mw;
1543 struct rpcrdma_frmr *frmr = &mw->r.frmr;
1544 struct ib_mr *mr = frmr->fr_mr;
1545 struct ib_send_wr invalidate_wr, frmr_wr, *bad_wr, *post_wr;
1554 pageoff = offset_in_page(seg1->mr_offset);
1555 seg1->mr_offset -= pageoff; /* start of page */
1556 seg1->mr_len += pageoff;
1558 if (*nsegs > ia->ri_max_frmr_depth)
1559 *nsegs = ia->ri_max_frmr_depth;
1560 for (page_no = i = 0; i < *nsegs;) {
1561 rpcrdma_map_one(ia, seg, writing);
1563 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
1564 frmr->fr_pgl->page_list[page_no++] = pa;
1570 /* Check for holes */
1571 if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
1572 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
1575 dprintk("RPC: %s: Using frmr %p to map %d segments\n",
1578 if (unlikely(frmr->fr_state == FRMR_IS_VALID)) {
1579 dprintk("RPC: %s: frmr %x left valid, posting invalidate.\n",
1580 __func__, mr->rkey);
1581 /* Invalidate before using. */
1582 memset(&invalidate_wr, 0, sizeof invalidate_wr);
1583 invalidate_wr.wr_id = (unsigned long)(void *)mw;
1584 invalidate_wr.next = &frmr_wr;
1585 invalidate_wr.opcode = IB_WR_LOCAL_INV;
1586 invalidate_wr.send_flags = IB_SEND_SIGNALED;
1587 invalidate_wr.ex.invalidate_rkey = mr->rkey;
1588 DECR_CQCOUNT(&r_xprt->rx_ep);
1589 post_wr = &invalidate_wr;
1593 /* Prepare FRMR WR */
1594 memset(&frmr_wr, 0, sizeof frmr_wr);
1595 frmr_wr.wr_id = (unsigned long)(void *)mw;
1596 frmr_wr.opcode = IB_WR_FAST_REG_MR;
1597 frmr_wr.send_flags = IB_SEND_SIGNALED;
1598 frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
1599 frmr_wr.wr.fast_reg.page_list = frmr->fr_pgl;
1600 frmr_wr.wr.fast_reg.page_list_len = page_no;
1601 frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
1602 frmr_wr.wr.fast_reg.length = page_no << PAGE_SHIFT;
1603 if (frmr_wr.wr.fast_reg.length < len) {
1609 key = (u8)(mr->rkey & 0x000000FF);
1610 ib_update_fast_reg_key(mr, ++key);
1612 frmr_wr.wr.fast_reg.access_flags = (writing ?
1613 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
1614 IB_ACCESS_REMOTE_READ);
1615 frmr_wr.wr.fast_reg.rkey = mr->rkey;
1616 DECR_CQCOUNT(&r_xprt->rx_ep);
1618 rc = ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
1621 dprintk("RPC: %s: failed ib_post_send for register,"
1622 " status %i\n", __func__, rc);
1623 ib_update_fast_reg_key(mr, --key);
1626 seg1->mr_rkey = mr->rkey;
1627 seg1->mr_base = seg1->mr_dma + pageoff;
1635 rpcrdma_unmap_one(ia, --seg);
1640 rpcrdma_deregister_frmr_external(struct rpcrdma_mr_seg *seg,
1641 struct rpcrdma_ia *ia, struct rpcrdma_xprt *r_xprt)
1643 struct rpcrdma_mr_seg *seg1 = seg;
1644 struct ib_send_wr invalidate_wr, *bad_wr;
1647 memset(&invalidate_wr, 0, sizeof invalidate_wr);
1648 invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw;
1649 invalidate_wr.opcode = IB_WR_LOCAL_INV;
1650 invalidate_wr.send_flags = IB_SEND_SIGNALED;
1651 invalidate_wr.ex.invalidate_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey;
1652 DECR_CQCOUNT(&r_xprt->rx_ep);
1654 read_lock(&ia->ri_qplock);
1655 while (seg1->mr_nsegs--)
1656 rpcrdma_unmap_one(ia, seg++);
1657 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
1658 read_unlock(&ia->ri_qplock);
1660 dprintk("RPC: %s: failed ib_post_send for invalidate,"
1661 " status %i\n", __func__, rc);
1666 rpcrdma_register_fmr_external(struct rpcrdma_mr_seg *seg,
1667 int *nsegs, int writing, struct rpcrdma_ia *ia)
1669 struct rpcrdma_mr_seg *seg1 = seg;
1670 u64 physaddrs[RPCRDMA_MAX_DATA_SEGS];
1671 int len, pageoff, i, rc;
1673 pageoff = offset_in_page(seg1->mr_offset);
1674 seg1->mr_offset -= pageoff; /* start of page */
1675 seg1->mr_len += pageoff;
1677 if (*nsegs > RPCRDMA_MAX_DATA_SEGS)
1678 *nsegs = RPCRDMA_MAX_DATA_SEGS;
1679 for (i = 0; i < *nsegs;) {
1680 rpcrdma_map_one(ia, seg, writing);
1681 physaddrs[i] = seg->mr_dma;
1685 /* Check for holes */
1686 if ((i < *nsegs && offset_in_page(seg->mr_offset)) ||
1687 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
1690 rc = ib_map_phys_fmr(seg1->mr_chunk.rl_mw->r.fmr,
1691 physaddrs, i, seg1->mr_dma);
1693 dprintk("RPC: %s: failed ib_map_phys_fmr "
1694 "%u@0x%llx+%i (%d)... status %i\n", __func__,
1695 len, (unsigned long long)seg1->mr_dma,
1698 rpcrdma_unmap_one(ia, --seg);
1700 seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.fmr->rkey;
1701 seg1->mr_base = seg1->mr_dma + pageoff;
1710 rpcrdma_deregister_fmr_external(struct rpcrdma_mr_seg *seg,
1711 struct rpcrdma_ia *ia)
1713 struct rpcrdma_mr_seg *seg1 = seg;
1717 list_add(&seg1->mr_chunk.rl_mw->r.fmr->list, &l);
1718 rc = ib_unmap_fmr(&l);
1719 read_lock(&ia->ri_qplock);
1720 while (seg1->mr_nsegs--)
1721 rpcrdma_unmap_one(ia, seg++);
1722 read_unlock(&ia->ri_qplock);
1724 dprintk("RPC: %s: failed ib_unmap_fmr,"
1725 " status %i\n", __func__, rc);
1730 rpcrdma_register_external(struct rpcrdma_mr_seg *seg,
1731 int nsegs, int writing, struct rpcrdma_xprt *r_xprt)
1733 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1736 switch (ia->ri_memreg_strategy) {
1738 #if RPCRDMA_PERSISTENT_REGISTRATION
1739 case RPCRDMA_ALLPHYSICAL:
1740 rpcrdma_map_one(ia, seg, writing);
1741 seg->mr_rkey = ia->ri_bind_mem->rkey;
1742 seg->mr_base = seg->mr_dma;
1748 /* Registration using frmr registration */
1750 rc = rpcrdma_register_frmr_external(seg, &nsegs, writing, ia, r_xprt);
1753 /* Registration using fmr memory registration */
1754 case RPCRDMA_MTHCAFMR:
1755 rc = rpcrdma_register_fmr_external(seg, &nsegs, writing, ia);
1768 rpcrdma_deregister_external(struct rpcrdma_mr_seg *seg,
1769 struct rpcrdma_xprt *r_xprt)
1771 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1772 int nsegs = seg->mr_nsegs, rc;
1774 switch (ia->ri_memreg_strategy) {
1776 #if RPCRDMA_PERSISTENT_REGISTRATION
1777 case RPCRDMA_ALLPHYSICAL:
1778 read_lock(&ia->ri_qplock);
1779 rpcrdma_unmap_one(ia, seg);
1780 read_unlock(&ia->ri_qplock);
1785 rc = rpcrdma_deregister_frmr_external(seg, ia, r_xprt);
1788 case RPCRDMA_MTHCAFMR:
1789 rc = rpcrdma_deregister_fmr_external(seg, ia);
1799 * Prepost any receive buffer, then post send.
1801 * Receive buffer is donated to hardware, reclaimed upon recv completion.
1804 rpcrdma_ep_post(struct rpcrdma_ia *ia,
1805 struct rpcrdma_ep *ep,
1806 struct rpcrdma_req *req)
1808 struct ib_send_wr send_wr, *send_wr_fail;
1809 struct rpcrdma_rep *rep = req->rl_reply;
1813 rc = rpcrdma_ep_post_recv(ia, ep, rep);
1816 req->rl_reply = NULL;
1819 send_wr.next = NULL;
1820 send_wr.wr_id = 0ULL; /* no send cookie */
1821 send_wr.sg_list = req->rl_send_iov;
1822 send_wr.num_sge = req->rl_niovs;
1823 send_wr.opcode = IB_WR_SEND;
1824 if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */
1825 ib_dma_sync_single_for_device(ia->ri_id->device,
1826 req->rl_send_iov[3].addr, req->rl_send_iov[3].length,
1828 ib_dma_sync_single_for_device(ia->ri_id->device,
1829 req->rl_send_iov[1].addr, req->rl_send_iov[1].length,
1831 ib_dma_sync_single_for_device(ia->ri_id->device,
1832 req->rl_send_iov[0].addr, req->rl_send_iov[0].length,
1835 if (DECR_CQCOUNT(ep) > 0)
1836 send_wr.send_flags = 0;
1837 else { /* Provider must take a send completion every now and then */
1839 send_wr.send_flags = IB_SEND_SIGNALED;
1842 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail);
1844 dprintk("RPC: %s: ib_post_send returned %i\n", __func__,
1851 * (Re)post a receive buffer.
1854 rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
1855 struct rpcrdma_ep *ep,
1856 struct rpcrdma_rep *rep)
1858 struct ib_recv_wr recv_wr, *recv_wr_fail;
1861 recv_wr.next = NULL;
1862 recv_wr.wr_id = (u64) (unsigned long) rep;
1863 recv_wr.sg_list = &rep->rr_iov;
1864 recv_wr.num_sge = 1;
1866 ib_dma_sync_single_for_cpu(ia->ri_id->device,
1867 rep->rr_iov.addr, rep->rr_iov.length, DMA_BIDIRECTIONAL);
1869 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail);
1872 dprintk("RPC: %s: ib_post_recv returned %i\n", __func__,
1877 /* Physical mapping means one Read/Write list entry per-page.
1878 * All list entries must fit within an inline buffer
1880 * NB: The server must return a Write list for NFS READ,
1881 * which has the same constraint. Factor in the inline
1885 rpcrdma_physical_max_payload(struct rpcrdma_xprt *r_xprt)
1887 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1888 unsigned int inline_size, pages;
1890 inline_size = min_t(unsigned int,
1891 cdata->inline_wsize, cdata->inline_rsize);
1892 inline_size -= RPCRDMA_HDRLEN_MIN;
1893 pages = inline_size / sizeof(struct rpcrdma_segment);
1894 return pages << PAGE_SHIFT;
1898 rpcrdma_mr_max_payload(struct rpcrdma_xprt *r_xprt)
1900 return RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
1904 rpcrdma_max_payload(struct rpcrdma_xprt *r_xprt)
1908 switch (r_xprt->rx_ia.ri_memreg_strategy) {
1909 case RPCRDMA_ALLPHYSICAL:
1910 result = rpcrdma_physical_max_payload(r_xprt);
1913 result = rpcrdma_mr_max_payload(r_xprt);