]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
svcrdma: Remove unused Read completion handlers
authorChuck Lever <chuck.lever@oracle.com>
Fri, 23 Jun 2017 21:18:49 +0000 (17:18 -0400)
committerJ. Bruce Fields <bfields@redhat.com>
Wed, 12 Jul 2017 19:54:57 +0000 (15:54 -0400)
Clean up:

The generic RDMA R/W API conversion of svc_rdma_recvfrom replaced
the Register, Read, and Invalidate completion handlers. Remove the
old ones, which are no longer used.

These handlers shared some helper code with svc_rdma_wc_send. Fold
the wc_common helper back into the one remaining completion handler.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
include/linux/sunrpc/svc_rdma.h
net/sunrpc/xprtrdma/svc_rdma_transport.c

index b1ba19ba10719971f18e43b5644334e363dc6013..06d58a3f74bc637e26420c2ee7a094add5dc658f 100644 (file)
@@ -77,17 +77,15 @@ extern atomic_t rdma_stat_sq_prod;
  */
 struct svc_rdma_op_ctxt {
        struct list_head list;
-       struct svc_rdma_op_ctxt *read_hdr;
        struct svc_rdma_fastreg_mr *frmr;
-       int hdr_count;
        struct xdr_buf arg;
        struct ib_cqe cqe;
        u32 byte_len;
        struct svcxprt_rdma *xprt;
-       unsigned long flags;
        enum dma_data_direction direction;
        int count;
        unsigned int mapped_sges;
+       int hdr_count;
        struct ib_send_wr send_wr;
        struct ib_sge sge[1 + RPCRDMA_MAX_INLINE_THRESH / PAGE_SIZE];
        struct page *pages[RPCSVC_MAXPAGES];
index 72d2dcdca2e1251fdf9031d5f0588a3291daa755..c915cba0f8e60003bbea63d84771f637eca20a1d 100644 (file)
@@ -346,36 +346,6 @@ out:
        svc_xprt_put(&xprt->sc_xprt);
 }
 
-static void svc_rdma_send_wc_common(struct svcxprt_rdma *xprt,
-                                   struct ib_wc *wc,
-                                   const char *opname)
-{
-       if (wc->status != IB_WC_SUCCESS)
-               goto err;
-
-out:
-       atomic_inc(&xprt->sc_sq_avail);
-       wake_up(&xprt->sc_send_wait);
-       return;
-
-err:
-       set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
-       if (wc->status != IB_WC_WR_FLUSH_ERR)
-               pr_err("svcrdma: %s: %s (%u/0x%x)\n",
-                      opname, ib_wc_status_msg(wc->status),
-                      wc->status, wc->vendor_err);
-       goto out;
-}
-
-static void svc_rdma_send_wc_common_put(struct ib_cq *cq, struct ib_wc *wc,
-                                       const char *opname)
-{
-       struct svcxprt_rdma *xprt = cq->cq_context;
-
-       svc_rdma_send_wc_common(xprt, wc, opname);
-       svc_xprt_put(&xprt->sc_xprt);
-}
-
 /**
  * svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
  * @cq:        completion queue
@@ -383,74 +353,29 @@ static void svc_rdma_send_wc_common_put(struct ib_cq *cq, struct ib_wc *wc,
  *
  */
 void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
-{
-       struct ib_cqe *cqe = wc->wr_cqe;
-       struct svc_rdma_op_ctxt *ctxt;
-
-       svc_rdma_send_wc_common_put(cq, wc, "send");
-
-       ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
-       svc_rdma_unmap_dma(ctxt);
-       svc_rdma_put_context(ctxt, 1);
-}
-
-/**
- * svc_rdma_wc_reg - Invoked by RDMA provider for each polled FASTREG WC
- * @cq:        completion queue
- * @wc:        completed WR
- *
- */
-void svc_rdma_wc_reg(struct ib_cq *cq, struct ib_wc *wc)
-{
-       svc_rdma_send_wc_common_put(cq, wc, "fastreg");
-}
-
-/**
- * svc_rdma_wc_read - Invoked by RDMA provider for each polled Read WC
- * @cq:        completion queue
- * @wc:        completed WR
- *
- */
-void svc_rdma_wc_read(struct ib_cq *cq, struct ib_wc *wc)
 {
        struct svcxprt_rdma *xprt = cq->cq_context;
        struct ib_cqe *cqe = wc->wr_cqe;
        struct svc_rdma_op_ctxt *ctxt;
 
-       svc_rdma_send_wc_common(xprt, wc, "read");
+       atomic_inc(&xprt->sc_sq_avail);
+       wake_up(&xprt->sc_send_wait);
 
        ctxt = container_of(cqe, struct svc_rdma_op_ctxt, cqe);
        svc_rdma_unmap_dma(ctxt);
-       svc_rdma_put_frmr(xprt, ctxt->frmr);
-
-       if (test_bit(RDMACTXT_F_LAST_CTXT, &ctxt->flags)) {
-               struct svc_rdma_op_ctxt *read_hdr;
-
-               read_hdr = ctxt->read_hdr;
-               spin_lock(&xprt->sc_rq_dto_lock);
-               list_add_tail(&read_hdr->list,
-                             &xprt->sc_read_complete_q);
-               spin_unlock(&xprt->sc_rq_dto_lock);
+       svc_rdma_put_context(ctxt, 1);
 
-               set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
-               svc_xprt_enqueue(&xprt->sc_xprt);
+       if (unlikely(wc->status != IB_WC_SUCCESS)) {
+               set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
+               if (wc->status != IB_WC_WR_FLUSH_ERR)
+                       pr_err("svcrdma: Send: %s (%u/0x%x)\n",
+                              ib_wc_status_msg(wc->status),
+                              wc->status, wc->vendor_err);
        }
 
-       svc_rdma_put_context(ctxt, 0);
        svc_xprt_put(&xprt->sc_xprt);
 }
 
-/**
- * svc_rdma_wc_inv - Invoked by RDMA provider for each polled LOCAL_INV WC
- * @cq:        completion queue
- * @wc:        completed WR
- *
- */
-void svc_rdma_wc_inv(struct ib_cq *cq, struct ib_wc *wc)
-{
-       svc_rdma_send_wc_common_put(cq, wc, "localInv");
-}
-
 static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
                                             int listener)
 {