2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Registration Work
7 * Requests (FRWR). Also referred to sometimes as FRMR mode.
9 * FRWR features ordered asynchronous registration and deregistration
10 * of arbitrarily sized memory regions. This is the fastest and safest
11 * but most complex memory registration mode.
14 #include "xprt_rdma.h"
16 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
17 # define RPCDBG_FACILITY RPCDBG_TRANS
21 __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
24 struct rpcrdma_frmr *f = &r->r.frmr;
27 f->fr_mr = ib_alloc_fast_reg_mr(pd, depth);
30 f->fr_pgl = ib_alloc_fast_reg_page_list(device, depth);
31 if (IS_ERR(f->fr_pgl))
36 rc = PTR_ERR(f->fr_mr);
37 dprintk("RPC: %s: ib_alloc_fast_reg_mr status %i\n",
42 rc = PTR_ERR(f->fr_pgl);
43 dprintk("RPC: %s: ib_alloc_fast_reg_page_list status %i\n",
45 ib_dereg_mr(f->fr_mr);
50 __frwr_release(struct rpcrdma_mw *r)
54 rc = ib_dereg_mr(r->r.frmr.fr_mr);
56 dprintk("RPC: %s: ib_dereg_mr status %i\n",
58 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl);
62 frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
63 struct rpcrdma_create_data_internal *cdata)
65 struct ib_device_attr *devattr = &ia->ri_devattr;
68 ia->ri_max_frmr_depth =
69 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
70 devattr->max_fast_reg_page_list_len);
71 dprintk("RPC: %s: device's max FR page list len = %u\n",
72 __func__, ia->ri_max_frmr_depth);
74 /* Add room for frmr register and invalidate WRs.
75 * 1. FRMR reg WR for head
76 * 2. FRMR invalidate WR for head
77 * 3. N FRMR reg WRs for pagelist
78 * 4. N FRMR invalidate WRs for pagelist
79 * 5. FRMR reg WR for tail
80 * 6. FRMR invalidate WR for tail
85 /* Calculate N if the device max FRMR depth is smaller than
86 * RPCRDMA_MAX_DATA_SEGS.
88 if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
89 delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
91 depth += 2; /* FRMR reg + invalidate */
92 delta -= ia->ri_max_frmr_depth;
96 ep->rep_attr.cap.max_send_wr *= depth;
97 if (ep->rep_attr.cap.max_send_wr > devattr->max_qp_wr) {
98 cdata->max_requests = devattr->max_qp_wr / depth;
99 if (!cdata->max_requests)
101 ep->rep_attr.cap.max_send_wr = cdata->max_requests *
108 /* FRWR mode conveys a list of pages per chunk segment. The
109 * maximum length of that list is the FRWR page list depth.
112 frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
114 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
116 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
117 rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
120 /* If FAST_REG or LOCAL_INV failed, indicate the frmr needs to be reset. */
122 frwr_sendcompletion(struct ib_wc *wc)
124 struct rpcrdma_mw *r;
126 if (likely(wc->status == IB_WC_SUCCESS))
129 /* WARNING: Only wr_id and status are reliable at this point */
130 r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id;
131 dprintk("RPC: %s: frmr %p (stale), status %d\n",
132 __func__, r, wc->status);
133 r->r.frmr.fr_state = FRMR_IS_STALE;
137 frwr_op_init(struct rpcrdma_xprt *r_xprt)
139 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
140 struct ib_device *device = r_xprt->rx_ia.ri_id->device;
141 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
142 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
145 INIT_LIST_HEAD(&buf->rb_mws);
146 INIT_LIST_HEAD(&buf->rb_all);
148 i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS;
149 dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i);
152 struct rpcrdma_mw *r;
155 r = kzalloc(sizeof(*r), GFP_KERNEL);
159 rc = __frwr_init(r, pd, device, depth);
165 list_add(&r->mw_list, &buf->rb_mws);
166 list_add(&r->mw_all, &buf->rb_all);
167 r->mw_sendcompletion = frwr_sendcompletion;
173 /* Post a FAST_REG Work Request to register a memory region
174 * for remote access via RDMA READ or RDMA WRITE.
177 frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
178 int nsegs, bool writing)
180 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
181 struct rpcrdma_mr_seg *seg1 = seg;
182 struct rpcrdma_mw *mw = seg1->rl_mw;
183 struct rpcrdma_frmr *frmr = &mw->r.frmr;
184 struct ib_mr *mr = frmr->fr_mr;
185 struct ib_send_wr fastreg_wr, *bad_wr;
193 pageoff = offset_in_page(seg1->mr_offset);
194 seg1->mr_offset -= pageoff; /* start of page */
195 seg1->mr_len += pageoff;
197 if (nsegs > ia->ri_max_frmr_depth)
198 nsegs = ia->ri_max_frmr_depth;
199 for (page_no = i = 0; i < nsegs;) {
200 rpcrdma_map_one(ia, seg, writing);
202 for (seg_len = seg->mr_len; seg_len > 0; seg_len -= PAGE_SIZE) {
203 frmr->fr_pgl->page_list[page_no++] = pa;
209 /* Check for holes */
210 if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
211 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
214 dprintk("RPC: %s: Using frmr %p to map %d segments (%d bytes)\n",
215 __func__, mw, i, len);
217 frmr->fr_state = FRMR_IS_VALID;
219 memset(&fastreg_wr, 0, sizeof(fastreg_wr));
220 fastreg_wr.wr_id = (unsigned long)(void *)mw;
221 fastreg_wr.opcode = IB_WR_FAST_REG_MR;
222 fastreg_wr.wr.fast_reg.iova_start = seg1->mr_dma + pageoff;
223 fastreg_wr.wr.fast_reg.page_list = frmr->fr_pgl;
224 fastreg_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
225 fastreg_wr.wr.fast_reg.page_list_len = page_no;
226 fastreg_wr.wr.fast_reg.length = len;
227 fastreg_wr.wr.fast_reg.access_flags = writing ?
228 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
229 IB_ACCESS_REMOTE_READ;
230 key = (u8)(mr->rkey & 0x000000FF);
231 ib_update_fast_reg_key(mr, ++key);
232 fastreg_wr.wr.fast_reg.rkey = mr->rkey;
234 DECR_CQCOUNT(&r_xprt->rx_ep);
235 rc = ib_post_send(ia->ri_id->qp, &fastreg_wr, &bad_wr);
239 seg1->mr_rkey = mr->rkey;
240 seg1->mr_base = seg1->mr_dma + pageoff;
246 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
247 ib_update_fast_reg_key(mr, --key);
248 frmr->fr_state = FRMR_IS_INVALID;
250 rpcrdma_unmap_one(ia, --seg);
254 /* Post a LOCAL_INV Work Request to prevent further remote access
255 * via RDMA READ or RDMA WRITE.
258 frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
260 struct rpcrdma_mr_seg *seg1 = seg;
261 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
262 struct ib_send_wr invalidate_wr, *bad_wr;
263 int rc, nsegs = seg->mr_nsegs;
265 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID;
267 memset(&invalidate_wr, 0, sizeof(invalidate_wr));
268 invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw;
269 invalidate_wr.opcode = IB_WR_LOCAL_INV;
270 invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey;
271 DECR_CQCOUNT(&r_xprt->rx_ep);
273 read_lock(&ia->ri_qplock);
274 while (seg1->mr_nsegs--)
275 rpcrdma_unmap_one(ia, seg++);
276 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr);
277 read_unlock(&ia->ri_qplock);
283 /* Force rpcrdma_buffer_get() to retry */
284 seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE;
285 dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
289 /* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in
290 * an unusable state. Find FRMRs in this state and dereg / reg
291 * each. FRMRs that are VALID and attached to an rpcrdma_req are
294 * This gives all in-use FRMRs a fresh rkey and leaves them INVALID.
296 * This is invoked only in the transport connect worker in order
297 * to serialize with rpcrdma_register_frmr_external().
300 frwr_op_reset(struct rpcrdma_xprt *r_xprt)
302 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
303 struct ib_device *device = r_xprt->rx_ia.ri_id->device;
304 unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
305 struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
306 struct rpcrdma_mw *r;
309 list_for_each_entry(r, &buf->rb_all, mw_all) {
310 if (r->r.frmr.fr_state == FRMR_IS_INVALID)
314 rc = __frwr_init(r, pd, device, depth);
316 dprintk("RPC: %s: mw %p left %s\n",
318 (r->r.frmr.fr_state == FRMR_IS_STALE ?
323 r->r.frmr.fr_state = FRMR_IS_INVALID;
328 frwr_op_destroy(struct rpcrdma_buffer *buf)
330 struct rpcrdma_mw *r;
332 while (!list_empty(&buf->rb_all)) {
333 r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
334 list_del(&r->mw_all);
340 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
341 .ro_map = frwr_op_map,
342 .ro_unmap = frwr_op_unmap,
343 .ro_open = frwr_op_open,
344 .ro_maxpages = frwr_op_maxpages,
345 .ro_init = frwr_op_init,
346 .ro_reset = frwr_op_reset,
347 .ro_destroy = frwr_op_destroy,
348 .ro_displayname = "frwr",