2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
16 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
17 * Work Request (frmr_op_map). When the RDMA operation is finished, this
18 * Memory Region is invalidated using a LOCAL_INV Work Request
21 * Typically these Work Requests are not signaled, and neither are RDMA
22 * SEND Work Requests (with the exception of signaling occasionally to
23 * prevent provider work queue overflows). This greatly reduces HCA
26 * As an optimization, frwr_op_unmap marks MRs INVALID before the
27 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
28 * rb_mws immediately so that no work (like managing a linked list
29 * under a spinlock) is needed in the completion upcall.
31 * But this means that frwr_op_map() can occasionally encounter an MR
32 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
33 * ordering prevents a subsequent FAST_REG WR from executing against
34 * that MR while it is still being invalidated.
39 * ->op_map and the transport connect worker cannot run at the same
40 * time, but ->op_unmap can fire while the transport connect worker
41 * is running. Thus MR recovery is handled in ->op_map, to guarantee
42 * that recovered MRs are owned by a sending RPC, and not one where
43 * ->op_unmap could fire at the same time transport reconnect is
46 * When the underlying transport disconnects, MRs are left in one of
49 * INVALID: The MR was not in use before the QP entered ERROR state.
50 * (Or, the LOCAL_INV WR has not completed or flushed yet).
52 * STALE: The MR was being registered or unregistered when the QP
53 * entered ERROR state, and the pending WR was flushed.
55 * VALID: The MR was registered before the QP entered ERROR state.
57 * When frwr_op_map encounters STALE and VALID MRs, they are recovered
58 * with ib_dereg_mr and then are re-initialized. Beause MR recovery
59 * allocates fresh resources, it is deferred to a workqueue, and the
60 * recovered MRs are placed back on the rb_mws list when recovery is
61 * complete. frwr_op_map allocates another MR for the current RPC while
62 * the broken MR is reset.
64 * To ensure that frwr_op_map doesn't encounter an MR that is marked
65 * INVALID but that is about to be flushed due to a previous transport
66 * disconnect, the transport connect worker attempts to drain all
67 * pending send queue WRs before the transport is reconnected.
70 #include "xprt_rdma.h"
72 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
73 # define RPCDBG_FACILITY RPCDBG_TRANS
76 static struct workqueue_struct *frwr_recovery_wq;
78 #define FRWR_RECOVERY_WQ_FLAGS (WQ_UNBOUND | WQ_MEM_RECLAIM)
81 frwr_alloc_recovery_wq(void)
83 frwr_recovery_wq = alloc_workqueue("frwr_recovery",
84 FRWR_RECOVERY_WQ_FLAGS, 0);
85 return !frwr_recovery_wq ? -ENOMEM : 0;
89 frwr_destroy_recovery_wq(void)
91 struct workqueue_struct *wq;
93 if (!frwr_recovery_wq)
96 wq = frwr_recovery_wq;
97 frwr_recovery_wq = NULL;
98 destroy_workqueue(wq);
101 /* Deferred reset of a single FRMR. Generate a fresh rkey by
104 * There's no recovery if this fails. The FRMR is abandoned, but
105 * remains in rb_all. It will be cleaned up when the transport is
109 __frwr_recovery_worker(struct work_struct *work)
111 struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw,
113 struct rpcrdma_xprt *r_xprt = r->r.frmr.fr_xprt;
114 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
115 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
117 if (ib_dereg_mr(r->r.frmr.fr_mr))
120 r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(pd, depth);
121 if (IS_ERR(r->r.frmr.fr_mr))
124 dprintk("RPC: %s: recovered FRMR %p\n", __func__, r);
125 r->r.frmr.fr_state = FRMR_IS_INVALID;
126 rpcrdma_put_mw(r_xprt, r);
130 pr_warn("RPC: %s: FRMR %p unrecovered\n",
134 /* A broken MR was discovered in a context that can't sleep.
135 * Defer recovery to the recovery worker.
138 __frwr_queue_recovery(struct rpcrdma_mw *r)
140 INIT_WORK(&r->r.frmr.fr_work, __frwr_recovery_worker);
141 queue_work(frwr_recovery_wq, &r->r.frmr.fr_work);
145 __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
148 struct rpcrdma_frmr *f = &r->r.frmr;
151 f->fr_mr = ib_alloc_fast_reg_mr(pd, depth);
152 if (IS_ERR(f->fr_mr))
154 f->fr_pgl = ib_alloc_fast_reg_page_list(device, depth);
155 if (IS_ERR(f->fr_pgl))
160 rc = PTR_ERR(f->fr_mr);
161 dprintk("RPC: %s: ib_alloc_fast_reg_mr status %i\n",
166 rc = PTR_ERR(f->fr_pgl);
167 dprintk("RPC: %s: ib_alloc_fast_reg_page_list status %i\n",
169 ib_dereg_mr(f->fr_mr);
174 __frwr_release(struct rpcrdma_mw *r)
178 rc = ib_dereg_mr(r->r.frmr.fr_mr);
180 dprintk("RPC: %s: ib_dereg_mr status %i\n",
182 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
186 frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
187 struct rpcrdma_create_data_internal *cdata)
189 struct ib_device_attr *devattr = &ia->ri_devattr;
192 ia->ri_max_frmr_depth =
193 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
194 devattr->max_fast_reg_page_list_len);
195 dprintk("RPC: %s: device's max FR page list len = %u\n",
196 __func__, ia->ri_max_frmr_depth);
198 /* Add room for frmr register and invalidate WRs.
199 * 1. FRMR reg WR for head
200 * 2. FRMR invalidate WR for head
201 * 3. N FRMR reg WRs for pagelist
202 * 4. N FRMR invalidate WRs for pagelist
203 * 5. FRMR reg WR for tail
204 * 6. FRMR invalidate WR for tail
205 * 7. The RDMA_SEND WR
209 /* Calculate N if the device max FRMR depth is smaller than
210 * RPCRDMA_MAX_DATA_SEGS.
212 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
213 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
215 depth += 2; /* FRMR reg + invalidate */
216 delta -= ia->ri_max_frmr_depth;
220 ep->rep_attr.cap.max_send_wr *= depth;
221 if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
222 cdata->max_requests = devattr->max_qp_wr / depth;
223 if (!cdata->max_requests)
225 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
232 /* FRWR mode conveys a list of pages per chunk segment. The
233 * maximum length of that list is the FRWR page list depth.
236 frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
238 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
240 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
241 rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
244 /* If FAST_REG or LOCAL_INV failed, indicate the frmr needs to be reset. */
246 frwr_sendcompletion(struct ib_wc *wc)
248 struct rpcrdma_mw *r;
250 if (likely(wc->status == IB_WC_SUCCESS))
253 /* WARNING: Only wr_id and status are reliable at this point */
254 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
255 pr_warn("RPC: %s: frmr %p flushed, status %s (%d)\n",
256 __func__, r, ib_wc_status_msg(wc->status), wc->status);
257 r->r.frmr.fr_state = FRMR_IS_STALE;
261 frwr_op_init(struct rpcrdma_xprt *r_xprt)
263 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
264 struct ib_device *device = r_xprt->rx_ia.ri_device;
265 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
266 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
269 spin_lock_init(&buf->rb_mwlock);
270 INIT_LIST_HEAD(&buf->rb_mws);
271 INIT_LIST_HEAD(&buf->rb_all);
273 i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1);
274 i += 2; /* head + tail */
275 i *= buf->rb_max_requests; /* one set for each RPC slot */
276 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
279 struct rpcrdma_mw *r;
282 r = kzalloc(sizeof(*r), GFP_KERNEL);
286 rc = __frwr_init(r, pd, device, depth);
292 list_add(&r->mw_list, &buf->rb_mws);
293 list_add(&r->mw_all, &buf->rb_all);
294 r->mw_sendcompletion = frwr_sendcompletion;
295 r->r.frmr.fr_xprt = r_xprt;
301 /* Post a FAST_REG Work Request to register a memory region
302 * for remote access via RDMA READ or RDMA WRITE.
305 frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
306 int nsegs, bool writing)
308 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
309 struct ib_device *device = ia->ri_device;
310 enum dma_data_direction direction = rpcrdma_data_dir(writing);
311 struct rpcrdma_mr_seg *seg1 = seg;
312 struct rpcrdma_mw *mw;
313 struct rpcrdma_frmr *frmr;
315 struct ib_send_wr fastreg_wr, *bad_wr;
327 __frwr_queue_recovery(mw);
328 mw = rpcrdma_get_mw(r_xprt);
331 } while (mw->r.frmr.fr_state != FRMR_IS_INVALID);
333 frmr->fr_state = FRMR_IS_VALID;
335 pageoff = offset_in_page(seg1->mr_offset);
336 seg1->mr_offset -= pageoff; /* start of page */
337 seg1->mr_len += pageoff;
339 if (nsegs > ia->ri_max_frmr_depth)
340 nsegs = ia->ri_max_frmr_depth;
342 for (page_no = i = 0; i < nsegs;) {
343 rpcrdma_map_one(device, seg, direction);
345 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
346 frmr->fr_pgl->page_list[page_no++] = pa;
352 /* Check for holes */
353 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
354 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
357 dprintk("RPC: %s: Using frmr %p to map %d segments (%d bytes)\n",
358 __func__, mw, i, len);
360 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
361 fastreg_wr.wr_id = (unsigned long)(void *)mw;
362 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
363 fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma + pageoff;
364 fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl;
365 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
366 fastreg_wr.wr.fast_reg.page_list_len = page_no;
367 fastreg_wr.wr.fast_reg.length = len;
368 fastreg_wr.wr.fast_reg.access_flags = writing ?
369 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
370 IB_ACCESS_REMOTE_READ;
372 key = (u8)(mr->rkey & 0x000000FF);
373 ib_update_fast_reg_key(mr, ++key);
374 fastreg_wr.wr.fast_reg.rkey = mr->rkey;
376 DECR_CQCOUNT(&r_xprt->rx_ep);
377 rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr);
382 seg1->mr_rkey = mr->rkey;
383 seg1->mr_base = seg1->mr_dma + pageoff;
389 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
391 rpcrdma_unmap_one(device, --seg);
392 __frwr_queue_recovery(mw);
396 /* Post a LOCAL_INV Work Request to prevent further remote access
397 * via RDMA READ or RDMA WRITE.
400 frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
402 struct rpcrdma_mr_seg *seg1 = seg;
403 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
404 struct rpcrdma_mw *mw = seg1->rl_mw;
405 struct ib_send_wr invalidate_wr, *bad_wr;
406 int rc, nsegs = seg->mr_nsegs;
408 dprintk("RPC: %s: FRMR %p\n", __func__, mw);
411 mw->r.frmr.fr_state = FRMR_IS_INVALID;
413 memset(&invalidate_wr, 0, sizeof(invalidate_wr));
414 invalidate_wr.wr_id = (unsigned long)(void *)mw;
415 invalidate_wr.opcode = IB_WR_LOCAL_INV;
416 invalidate_wr.ex.invalidate_rkey = mw->r.frmr.fr_mr->rkey;
417 DECR_CQCOUNT(&r_xprt->rx_ep);
419 while (seg1->mr_nsegs--)
420 rpcrdma_unmap_one(ia->ri_device, seg++);
421 read_lock(&ia->ri_qplock);
422 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
423 read_unlock(&ia->ri_qplock);
427 rpcrdma_put_mw(r_xprt, mw);
431 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
432 __frwr_queue_recovery(mw);
437 frwr_op_destroy(struct rpcrdma_buffer *buf)
439 struct rpcrdma_mw *r;
441 /* Ensure stale MWs for "buf" are no longer in flight */
442 flush_workqueue(frwr_recovery_wq);
444 while (!list_empty(&buf->rb_all)) {
445 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
446 list_del(&r->mw_all);
452 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
453 .ro_map = frwr_op_map,
454 .ro_unmap = frwr_op_unmap,
455 .ro_open = frwr_op_open,
456 .ro_maxpages = frwr_op_maxpages,
457 .ro_init = frwr_op_init,
458 .ro_destroy = frwr_op_destroy,
459 .ro_displayname = "frwr",