]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
IB/ehca: Wait for async events to finish before destroying QP
authorStefan Roscher <ossrosch@linux.vnet.ibm.com>
Wed, 7 May 2008 18:35:06 +0000 (11:35 -0700)
committerRoland Dreier <rolandd@cisco.com>
Wed, 7 May 2008 18:35:06 +0000 (11:35 -0700)
This is necessary because, in a multicore environment, a race between
uverbs async handler and destroy QP could occur.

Signed-off-by: Stefan Roscher <stefan.roscher at de.ibm.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/ehca/ehca_classes.h
drivers/infiniband/hw/ehca/ehca_irq.c
drivers/infiniband/hw/ehca/ehca_qp.c

index 00bab60f6de474a1ed24e77ee9331f46efe7103f..1e9e99a13933bd7dfa671cc8a125c9abddb526fc 100644 (file)
@@ -192,6 +192,8 @@ struct ehca_qp {
        int mtu_shift;
        u32 message_count;
        u32 packet_count;
+       atomic_t nr_events; /* events seen */
+       wait_queue_head_t wait_completion;
 };
 
 #define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
index ca5eb0cb628cf74cb4df726667cd2f378199448c..ce1ab0571be38303b8554bd69dcde739f252c826 100644 (file)
@@ -204,6 +204,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
 
        read_lock(&ehca_qp_idr_lock);
        qp = idr_find(&ehca_qp_idr, token);
+       if (qp)
+               atomic_inc(&qp->nr_events);
        read_unlock(&ehca_qp_idr_lock);
 
        if (!qp)
@@ -223,6 +225,8 @@ static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
        if (fatal && qp->ext_type == EQPT_SRQBASE)
                dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
 
+       if (atomic_dec_and_test(&qp->nr_events))
+               wake_up(&qp->wait_completion);
        return;
 }
 
index 18fba92fa7ae107311121b7cc591aa077c7b51eb..3f59587338ea5fc03a2404500f0d9633a468439f 100644 (file)
@@ -566,6 +566,8 @@ static struct ehca_qp *internal_create_qp(
                return ERR_PTR(-ENOMEM);
        }
 
+       atomic_set(&my_qp->nr_events, 0);
+       init_waitqueue_head(&my_qp->wait_completion);
        spin_lock_init(&my_qp->spinlock_s);
        spin_lock_init(&my_qp->spinlock_r);
        my_qp->qp_type = qp_type;
@@ -1934,6 +1936,9 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
        idr_remove(&ehca_qp_idr, my_qp->token);
        write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
 
+       /* now wait until all pending events have completed */
+       wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
+
        h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
        if (h_ret != H_SUCCESS) {
                ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li "