rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result)
{
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
- struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)req->rl_base;
+ struct rpcrdma_msg *headerp = rdmab_to_msg(req->rl_rdmabuf);
if (req->rl_rtype != rpcrdma_noch)
result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
base = rqst->rq_svec[0].iov_base;
rpclen = rqst->rq_svec[0].iov_len;
- /* build RDMA header in private area at front */
- headerp = (struct rpcrdma_msg *) req->rl_base;
+ headerp = rdmab_to_msg(req->rl_rdmabuf);
/* don't byte-swap XID, it's already done in request */
headerp->rm_xid = rqst->rq_xid;
headerp->rm_vers = rpcrdma_version;
dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
" headerp 0x%p base 0x%p lkey 0x%x\n",
__func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen,
- headerp, base, req->rl_iov.lkey);
+ headerp, base, rdmab_lkey(req->rl_rdmabuf));
/*
* initialize send_iov's - normally only two: rdma chunk header and
* header and any write data. In all non-rdma cases, any following
* data has been copied into the RPC header buffer.
*/
- req->rl_send_iov[0].addr = req->rl_iov.addr;
+ req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
req->rl_send_iov[0].length = hdrlen;
- req->rl_send_iov[0].lkey = req->rl_iov.lkey;
+ req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
req->rl_send_iov[1].length = rpclen;
if (req == NULL)
return NULL;
+ if (req->rl_rdmabuf == NULL)
+ goto out_rdmabuf;
if (req->rl_sendbuf == NULL)
goto out_sendbuf;
if (size > req->rl_sendbuf->rg_size)
req->rl_connect_cookie = 0; /* our reserved value */
return req->rl_sendbuf->rg_base;
+out_rdmabuf:
+ min_size = RPCRDMA_INLINE_WRITE_THRESHOLD(task->tk_rqstp);
+ rb = rpcrdma_alloc_regbuf(&r_xprt->rx_ia, min_size, flags);
+ if (IS_ERR(rb))
+ goto out_fail;
+ req->rl_rdmabuf = rb;
+
out_sendbuf:
/* XDR encoding and RPC/RDMA marshaling of this request has not
* yet occurred. Thus a lower bound is needed to prevent buffer
static struct rpcrdma_req *
rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
{
- struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
- size_t wlen = cdata->inline_wsize;
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
struct rpcrdma_req *req;
- int rc;
- rc = -ENOMEM;
- req = kmalloc(sizeof(*req) + wlen, GFP_KERNEL);
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
if (req == NULL)
- goto out;
- memset(req, 0, sizeof(*req));
-
- rc = rpcrdma_register_internal(ia, req->rl_base, wlen,
- &req->rl_handle, &req->rl_iov);
- if (rc)
- goto out_free;
+ return ERR_PTR(-ENOMEM);
req->rl_buffer = &r_xprt->rx_buf;
return req;
-
-out_free:
- kfree(req);
-out:
- return ERR_PTR(rc);
}
static struct rpcrdma_rep *
return;
rpcrdma_free_regbuf(ia, req->rl_sendbuf);
- rpcrdma_deregister_internal(ia, req->rl_handle, &req->rl_iov);
+ rpcrdma_free_regbuf(ia, req->rl_rdmabuf);
kfree(req);
}
enum rpcrdma_chunktype rl_rtype, rl_wtype;
struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
- struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */
struct ib_sge rl_send_iov[4]; /* for active requests */
+ struct rpcrdma_regbuf *rl_rdmabuf;
struct rpcrdma_regbuf *rl_sendbuf;
- struct ib_sge rl_iov; /* for posting */
- struct ib_mr *rl_handle; /* handle for mem in rl_iov */
- char rl_base[MAX_RPCRDMAHDR]; /* start of actual buffer */
+ struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
};
static inline struct rpcrdma_req *