The svc_sock_release function releases pages allocated to a thread. For
UDP this frees the receive skb. For RDMA it will post a receive WR
and bump the client credit count.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Acked-by: Neil Brown <neilb@suse.de>
Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Greg Banks <gnb@sgi.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
struct auth_ops * rq_authop; /* authentication flavour */
u32 rq_flavor; /* pseudoflavor */
struct svc_cred rq_cred; /* auth info */
struct auth_ops * rq_authop; /* authentication flavour */
u32 rq_flavor; /* pseudoflavor */
struct svc_cred rq_cred; /* auth info */
- struct sk_buff * rq_skbuff; /* fast recv inet buffer */
+ void * rq_xprt_ctxt; /* transport specific context ptr */
struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
struct xdr_buf rq_arg;
struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
struct xdr_buf rq_arg;
struct svc_xprt_ops {
int (*xpo_recvfrom)(struct svc_rqst *);
int (*xpo_sendto)(struct svc_rqst *);
struct svc_xprt_ops {
int (*xpo_recvfrom)(struct svc_rqst *);
int (*xpo_sendto)(struct svc_rqst *);
+ void (*xpo_release_rqst)(struct svc_rqst *);
};
struct svc_xprt_class {
};
struct svc_xprt_class {
/*
* Release an skbuff after use
*/
/*
* Release an skbuff after use
*/
-static inline void
-svc_release_skb(struct svc_rqst *rqstp)
+static void svc_release_skb(struct svc_rqst *rqstp)
- struct sk_buff *skb = rqstp->rq_skbuff;
+ struct sk_buff *skb = rqstp->rq_xprt_ctxt;
struct svc_deferred_req *dr = rqstp->rq_deferred;
if (skb) {
struct svc_deferred_req *dr = rqstp->rq_deferred;
if (skb) {
- rqstp->rq_skbuff = NULL;
+ rqstp->rq_xprt_ctxt = NULL;
dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
{
struct svc_sock *svsk = rqstp->rq_sock;
{
struct svc_sock *svsk = rqstp->rq_sock;
- svc_release_skb(rqstp);
+ rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
svc_free_res_pages(rqstp);
rqstp->rq_res.page_len = 0;
svc_free_res_pages(rqstp);
rqstp->rq_res.page_len = 0;
skb_free_datagram(svsk->sk_sk, skb);
return 0;
}
skb_free_datagram(svsk->sk_sk, skb);
return 0;
}
- rqstp->rq_skbuff = skb;
+ rqstp->rq_xprt_ctxt = skb;
}
rqstp->rq_arg.page_base = 0;
}
rqstp->rq_arg.page_base = 0;
static struct svc_xprt_ops svc_udp_ops = {
.xpo_recvfrom = svc_udp_recvfrom,
.xpo_sendto = svc_udp_sendto,
static struct svc_xprt_ops svc_udp_ops = {
.xpo_recvfrom = svc_udp_recvfrom,
.xpo_sendto = svc_udp_sendto,
+ .xpo_release_rqst = svc_release_skb,
};
static struct svc_xprt_class svc_udp_class = {
};
static struct svc_xprt_class svc_udp_class = {
rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
}
rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
}
- rqstp->rq_skbuff = NULL;
+ rqstp->rq_xprt_ctxt = NULL;
rqstp->rq_prot = IPPROTO_TCP;
/* Reset TCP read info */
rqstp->rq_prot = IPPROTO_TCP;
/* Reset TCP read info */
static struct svc_xprt_ops svc_tcp_ops = {
.xpo_recvfrom = svc_tcp_recvfrom,
.xpo_sendto = svc_tcp_sendto,
static struct svc_xprt_ops svc_tcp_ops = {
.xpo_recvfrom = svc_tcp_recvfrom,
.xpo_sendto = svc_tcp_sendto,
+ .xpo_release_rqst = svc_release_skb,
};
static struct svc_xprt_class svc_tcp_class = {
};
static struct svc_xprt_class svc_tcp_class = {
}
/* release the receive skb before sending the reply */
}
/* release the receive skb before sending the reply */
- svc_release_skb(rqstp);
+ rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
/* calculate over-all length */
xb = & rqstp->rq_res;
/* calculate over-all length */
xb = & rqstp->rq_res;