]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
RDMA/cxgb3: QP flush fixes
authorSteve Wise <swise@opengridcomputing.com>
Fri, 2 May 2008 16:17:41 +0000 (11:17 -0500)
committerRoland Dreier <rolandd@cisco.com>
Fri, 2 May 2008 17:56:57 +0000 (10:56 -0700)
- Flush the QP only after the HW disables the connection.  Currently
  we flush the QP when transitioning to CLOSING.  This exposes a race
  condition where the HW can complete a RECV WR, for instance, -and-
  the SW can flush that same WR.

- Only call CQ event handlers on flush IFF we actually flushed something.

Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/cxgb3/cxio_hal.c
drivers/infiniband/hw/cxgb3/cxio_hal.h
drivers/infiniband/hw/cxgb3/iwch_qp.c

index ed2ee4ba4b7c3e9e50ed8d162fbc40b5482945ca..5fd8506a865735a87df96738a4677ad227e8e381 100644 (file)
@@ -359,9 +359,10 @@ static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
        cq->sw_wptr++;
 }
 
-void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
+int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
 {
        u32 ptr;
+       int flushed = 0;
 
        PDBG("%s wq %p cq %p\n", __func__, wq, cq);
 
@@ -369,8 +370,11 @@ void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
        PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
            wq->rq_rptr, wq->rq_wptr, count);
        ptr = wq->rq_rptr + count;
-       while (ptr++ != wq->rq_wptr)
+       while (ptr++ != wq->rq_wptr) {
                insert_recv_cqe(wq, cq);
+               flushed++;
+       }
+       return flushed;
 }
 
 static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
@@ -394,9 +398,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
        cq->sw_wptr++;
 }
 
-void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
+int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
 {
        __u32 ptr;
+       int flushed = 0;
        struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
 
        ptr = wq->sq_rptr + count;
@@ -405,7 +410,9 @@ void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
                insert_sq_cqe(wq, cq, sqp);
                sqp++;
                ptr++;
+               flushed++;
        }
+       return flushed;
 }
 
 /*
index 2bcff7f5046e3a00c10675643f40872319bb51ef..69ab08ebc680166292ffe8f4b040f96b21dcabb3 100644 (file)
@@ -173,8 +173,8 @@ u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
 void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
 int __init cxio_hal_init(void);
 void __exit cxio_hal_exit(void);
-void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
-void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
+int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
+int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
 void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
 void cxio_flush_hw_cq(struct t3_cq *cq);
index 9b4be889c58ea8d9d378200e0297958a337ef16c..79dbe5beae52b3ee3095ac24aa78f12ee0217b4e 100644 (file)
@@ -655,6 +655,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
 {
        struct iwch_cq *rchp, *schp;
        int count;
+       int flushed;
 
        rchp = get_chp(qhp->rhp, qhp->attr.rcq);
        schp = get_chp(qhp->rhp, qhp->attr.scq);
@@ -669,20 +670,22 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
        spin_lock(&qhp->lock);
        cxio_flush_hw_cq(&rchp->cq);
        cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
-       cxio_flush_rq(&qhp->wq, &rchp->cq, count);
+       flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
        spin_unlock(&qhp->lock);
        spin_unlock_irqrestore(&rchp->lock, *flag);
-       (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+       if (flushed)
+               (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
 
        /* locking heirarchy: cq lock first, then qp lock. */
        spin_lock_irqsave(&schp->lock, *flag);
        spin_lock(&qhp->lock);
        cxio_flush_hw_cq(&schp->cq);
        cxio_count_scqes(&schp->cq, &qhp->wq, &count);
-       cxio_flush_sq(&qhp->wq, &schp->cq, count);
+       flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
        spin_unlock(&qhp->lock);
        spin_unlock_irqrestore(&schp->lock, *flag);
-       (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+       if (flushed)
+               (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
 
        /* deref */
        if (atomic_dec_and_test(&qhp->refcnt))
@@ -880,7 +883,6 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
                                ep = qhp->ep;
                                get_ep(&ep->com);
                        }
-                       flush_qp(qhp, &flag);
                        break;
                case IWCH_QP_STATE_TERMINATE:
                        qhp->attr.state = IWCH_QP_STATE_TERMINATE;
@@ -911,6 +913,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
                }
                switch (attrs->next_state) {
                        case IWCH_QP_STATE_IDLE:
+                               flush_qp(qhp, &flag);
                                qhp->attr.state = IWCH_QP_STATE_IDLE;
                                qhp->attr.llp_stream_handle = NULL;
                                put_ep(&qhp->ep->com);