return ret;
}
-static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
-{
- return hash_32(qpn, dev->rdi.qp_dev->qp_table_bits);
-}
-
-/*
- * Remove the QP from the table so it can't be found asynchronously by
- * the receive interrupt routine.
- */
-static void remove_qp(struct qib_ibdev *dev, struct rvt_qp *qp)
-{
- struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
- unsigned long flags;
- int removed = 1;
- spinlock_t *qpt_lock_ptr; /* Pointer to make checkpatch happy */
-
- spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
-
- qpt_lock_ptr = &dev->rdi.qp_dev->qpt_lock;
- if (rcu_dereference_protected(ibp->rvp.qp[0],
- lockdep_is_held(qpt_lock_ptr)) == qp) {
- RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
- } else if (rcu_dereference_protected(ibp->rvp.qp[1],
- lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) {
- RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
- } else {
- struct rvt_qp *q;
- struct rvt_qp __rcu **qpp;
-
- removed = 0;
- qpp = &dev->rdi.qp_dev->qp_table[n];
- for (; (q = rcu_dereference_protected(*qpp,
- lockdep_is_held(qpt_lock_ptr))) != NULL;
- qpp = &q->next)
- if (q == qp) {
- RCU_INIT_POINTER(*qpp,
- rcu_dereference_protected(qp->next,
- lockdep_is_held(qpt_lock_ptr)));
- removed = 1;
- break;
- }
- }
-
- spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
- if (removed) {
- synchronize_rcu();
- if (atomic_dec_and_test(&qp->refcount))
- wake_up(&qp->wait);
- }
-}
-
/**
* qib_free_all_qps - check for QPs still in use
*/
spin_unlock(&dev->rdi.pending_lock);
}
-/**
- * qib_destroy_qp - destroy a queue pair
- * @ibqp: the queue pair to destroy
- *
- * Returns 0 on success.
- *
- * Note that this can be called while the QP is actively sending or
- * receiving!
- */
-int qib_destroy_qp(struct ib_qp *ibqp)
-{
- struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
- struct qib_ibdev *dev = to_idev(ibqp->device);
- struct qib_qp_priv *priv = qp->priv;
-
- /* Make sure HW and driver activity is stopped. */
- spin_lock_irq(&qp->s_lock);
- if (qp->state != IB_QPS_RESET) {
- qp->state = IB_QPS_RESET;
- spin_lock(&dev->rdi.pending_lock);
- if (!list_empty(&priv->iowait))
- list_del_init(&priv->iowait);
- spin_unlock(&dev->rdi.pending_lock);
- qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
- spin_unlock_irq(&qp->s_lock);
- cancel_work_sync(&priv->s_work);
- del_timer_sync(&qp->s_timer);
- wait_event(priv->wait_dma, !atomic_read(&priv->s_dma_busy));
- if (priv->s_tx) {
- qib_put_txreq(priv->s_tx);
- priv->s_tx = NULL;
- }
- remove_qp(dev, qp);
- wait_event(qp->wait, !atomic_read(&qp->refcount));
- rvt_clear_mr_refs(qp, 1);
- } else
- spin_unlock_irq(&qp->s_lock);
-
- /* all user's cleaned up, mark it available */
- rvt_free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
- rvt_dec_qp_cnt(&dev->rdi);
-
- if (qp->ip)
- kref_put(&qp->ip->ref, rvt_release_mmap_info);
- else
- vfree(qp->r_rq.wq);
- vfree(qp->s_wq);
- kfree(priv->s_hdr);
- kfree(priv);
- kfree(qp);
- return 0;
-}
-
/**
* qib_get_credit - flush the send work queue of a QP
* @qp: the qp who's send work queue to flush