/* Queue pairs */
+static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
+{
+ mutex_lock(&xrcd->tgt_qp_mutex);
+ list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
+ mutex_unlock(&xrcd->tgt_qp_mutex);
+}
+
+static void __ib_remove_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
+{
+ mutex_lock(&xrcd->tgt_qp_mutex);
+ list_del(&qp->xrcd_list);
+ mutex_unlock(&xrcd->tgt_qp_mutex);
+}
+
struct ib_qp *ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr)
{
qp->srq = NULL;
qp->xrcd = qp_init_attr->xrcd;
atomic_inc(&qp_init_attr->xrcd->usecnt);
+ __ib_insert_xrcd_qp(qp_init_attr->xrcd, qp);
} else {
if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
qp->recv_cq = NULL;
rcq = qp->recv_cq;
srq = qp->srq;
xrcd = qp->xrcd;
+ if (xrcd)
+ __ib_remove_xrcd_qp(xrcd, qp);
ret = qp->device->destroy_qp(qp);
if (!ret) {
atomic_dec(&srq->usecnt);
if (xrcd)
atomic_dec(&xrcd->usecnt);
+ } else if (xrcd) {
+ __ib_insert_xrcd_qp(xrcd, qp);
}
return ret;
}
EXPORT_SYMBOL(ib_destroy_qp);
+int ib_release_qp(struct ib_qp *qp)
+{
+ unsigned long flags;
+
+ if (qp->qp_type != IB_QPT_XRC_TGT)
+ return -EINVAL;
+
+ spin_lock_irqsave(&qp->device->event_handler_lock, flags);
+ qp->event_handler = NULL;
+ spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
+
+ atomic_dec(&qp->xrcd->usecnt);
+ return 0;
+}
+EXPORT_SYMBOL(ib_release_qp);
+
/* Completion queues */
struct ib_cq *ib_create_cq(struct ib_device *device,
if (!IS_ERR(xrcd)) {
xrcd->device = device;
atomic_set(&xrcd->usecnt, 0);
+ mutex_init(&xrcd->tgt_qp_mutex);
+ INIT_LIST_HEAD(&xrcd->tgt_qp_list);
}
return xrcd;
int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
+ struct ib_qp *qp;
+ int ret;
+
if (atomic_read(&xrcd->usecnt))
return -EBUSY;
+ while (!list_empty(&xrcd->tgt_qp_list)) {
+ qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
+ ret = ib_destroy_qp(qp);
+ if (ret)
+ return ret;
+ }
+
return xrcd->device->dealloc_xrcd(xrcd);
}
EXPORT_SYMBOL(ib_dealloc_xrcd);
struct ib_xrcd {
struct ib_device *device;
- atomic_t usecnt; /* count all resources */
+ atomic_t usecnt; /* count all exposed resources */
+
+ struct mutex tgt_qp_mutex;
+ struct list_head tgt_qp_list;
};
struct ib_ah {
struct ib_cq *recv_cq;
struct ib_srq *srq;
struct ib_xrcd *xrcd; /* XRC TGT QPs only */
+ struct list_head xrcd_list;
struct ib_uobject *uobject;
void (*event_handler)(struct ib_event *, void *);
void *qp_context;
*/
int ib_destroy_qp(struct ib_qp *qp);
+/**
+ * ib_release_qp - Release an external reference to a QP.
+ * @qp: The QP handle to release
+ *
+ * The specified QP handle is released by the caller. If the QP is
+ * referenced internally, it is not destroyed until all internal
+ * references are released. After releasing the qp, the caller
+ * can no longer access it and all events on the QP are discarded.
+ */
+int ib_release_qp(struct ib_qp *qp);
+
/**
* ib_post_send - Posts a list of work requests to the send queue of
* the specified QP.