]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
[PATCH] IB/mthca: Factor out common queue alloc code
authorRoland Dreier <roland@eddore.topspincom.com>
Thu, 18 Aug 2005 20:39:31 +0000 (13:39 -0700)
committerRoland Dreier <rolandd@cisco.com>
Sat, 27 Aug 2005 03:37:37 +0000 (20:37 -0700)
Clean up the allocation of memory for queues by factoring out the
common code into mthca_buf_alloc() and mthca_buf_free().  Now CQs and
QPs share the same queue allocation code, which we'll also use for SRQs.

Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/mthca/mthca_allocator.c
drivers/infiniband/hw/mthca/mthca_cq.c
drivers/infiniband/hw/mthca/mthca_dev.h
drivers/infiniband/hw/mthca/mthca_provider.h
drivers/infiniband/hw/mthca/mthca_qp.c

index b1db48dd91d6ee8ba484c178fa1e851cdb00bfe5..9ba3211cef7cb2a7747348420d5dbad3ff0ee18d 100644 (file)
@@ -177,3 +177,119 @@ void mthca_array_cleanup(struct mthca_array *array, int nent)
 
        kfree(array->page_list);
 }
+
+/*
+ * Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0.  If the
+ * requested size is > max_direct, we split the allocation into
+ * multiple pages, so we don't require too much contiguous memory.
+ */
+
+int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
+                   union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
+                   int hca_write, struct mthca_mr *mr)
+{
+       int err = -ENOMEM;
+       int npages, shift;
+       u64 *dma_list = NULL;
+       dma_addr_t t;
+       int i;
+
+       if (size <= max_direct) {
+               *is_direct = 1;
+               npages     = 1;
+               shift      = get_order(size) + PAGE_SHIFT;
+
+               buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
+                                                    size, &t, GFP_KERNEL);
+               if (!buf->direct.buf)
+                       return -ENOMEM;
+
+               pci_unmap_addr_set(&buf->direct, mapping, t);
+
+               memset(buf->direct.buf, 0, size);
+
+               while (t & ((1 << shift) - 1)) {
+                       --shift;
+                       npages *= 2;
+               }
+
+               dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+               if (!dma_list)
+                       goto err_free;
+
+               for (i = 0; i < npages; ++i)
+                       dma_list[i] = t + i * (1 << shift);
+       } else {
+               *is_direct = 0;
+               npages     = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+               shift      = PAGE_SHIFT;
+
+               dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
+               if (!dma_list)
+                       return -ENOMEM;
+
+               buf->page_list = kmalloc(npages * sizeof *buf->page_list,
+                                        GFP_KERNEL);
+               if (!buf->page_list)
+                       goto err_out;
+
+               for (i = 0; i < npages; ++i)
+                       buf->page_list[i].buf = NULL;
+
+               for (i = 0; i < npages; ++i) {
+                       buf->page_list[i].buf =
+                               dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+                                                  &t, GFP_KERNEL);
+                       if (!buf->page_list[i].buf)
+                               goto err_free;
+
+                       dma_list[i] = t;
+                       pci_unmap_addr_set(&buf->page_list[i], mapping, t);
+
+                       memset(buf->page_list[i].buf, 0, PAGE_SIZE);
+               }
+       }
+
+       err = mthca_mr_alloc_phys(dev, pd->pd_num,
+                                 dma_list, shift, npages,
+                                 0, size,
+                                 MTHCA_MPT_FLAG_LOCAL_READ |
+                                 (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
+                                 mr);
+       if (err)
+               goto err_free;
+
+       kfree(dma_list);
+
+       return 0;
+
+err_free:
+       mthca_buf_free(dev, size, buf, *is_direct, NULL);
+
+err_out:
+       kfree(dma_list);
+
+       return err;
+}
+
+void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
+                   int is_direct, struct mthca_mr *mr)
+{
+       int i;
+
+       if (mr)
+               mthca_free_mr(dev, mr);
+
+       if (is_direct)
+               dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
+                                 pci_unmap_addr(&buf->direct, mapping));
+       else {
+               for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
+                       dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+                                         buf->page_list[i].buf,
+                                         pci_unmap_addr(&buf->page_list[i],
+                                                        mapping));
+               kfree(buf->page_list);
+       }
+}
index 907867d1f2e08389eb5d20b96458d9efa15b4fb6..8afb9ee2fbc64eddc8a59b4d29fc2a6eb7ef6720 100644 (file)
@@ -639,113 +639,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
 
 static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
 {
-       int i;
-       int size;
-
-       if (cq->is_direct)
-               dma_free_coherent(&dev->pdev->dev,
-                                 (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
-                                 cq->queue.direct.buf,
-                                 pci_unmap_addr(&cq->queue.direct,
-                                                mapping));
-       else {
-               size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
-               for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
-                       if (cq->queue.page_list[i].buf)
-                               dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                                 cq->queue.page_list[i].buf,
-                                                 pci_unmap_addr(&cq->queue.page_list[i],
-                                                                mapping));
-
-               kfree(cq->queue.page_list);
-       }
-}
-
-static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
-                             struct mthca_cq *cq)
-{
-       int err = -ENOMEM;
-       int npages, shift;
-       u64 *dma_list = NULL;
-       dma_addr_t t;
-       int i;
-
-       if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) {
-               cq->is_direct = 1;
-               npages        = 1;
-               shift         = get_order(size) + PAGE_SHIFT;
-
-               cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
-                                                         size, &t, GFP_KERNEL);
-               if (!cq->queue.direct.buf)
-                       return -ENOMEM;
-
-               pci_unmap_addr_set(&cq->queue.direct, mapping, t);
-
-               memset(cq->queue.direct.buf, 0, size);
-
-               while (t & ((1 << shift) - 1)) {
-                       --shift;
-                       npages *= 2;
-               }
-
-               dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
-               if (!dma_list)
-                       goto err_free;
-
-               for (i = 0; i < npages; ++i)
-                       dma_list[i] = t + i * (1 << shift);
-       } else {
-               cq->is_direct = 0;
-               npages        = (size + PAGE_SIZE - 1) / PAGE_SIZE;
-               shift         = PAGE_SHIFT;
-
-               dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
-               if (!dma_list)
-                       return -ENOMEM;
-
-               cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list,
-                                             GFP_KERNEL);
-               if (!cq->queue.page_list)
-                       goto err_out;
-
-               for (i = 0; i < npages; ++i)
-                       cq->queue.page_list[i].buf = NULL;
-
-               for (i = 0; i < npages; ++i) {
-                       cq->queue.page_list[i].buf =
-                               dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                                  &t, GFP_KERNEL);
-                       if (!cq->queue.page_list[i].buf)
-                               goto err_free;
-
-                       dma_list[i] = t;
-                       pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t);
-
-                       memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE);
-               }
-       }
-
-       err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
-                                 dma_list, shift, npages,
-                                 0, size,
-                                 MTHCA_MPT_FLAG_LOCAL_WRITE |
-                                 MTHCA_MPT_FLAG_LOCAL_READ,
-                                 &cq->mr);
-       if (err)
-               goto err_free;
-
-       kfree(dma_list);
-
-       return 0;
-
-err_free:
-       mthca_free_cq_buf(dev, cq);
-
-err_out:
-       kfree(dma_list);
-
-       return err;
+       mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
+                      &cq->queue, cq->is_direct, &cq->mr);
 }
 
 int mthca_init_cq(struct mthca_dev *dev, int nent,
@@ -797,7 +692,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
        cq_context = mailbox->buf;
 
        if (cq->is_kernel) {
-               err = mthca_alloc_cq_buf(dev, size, cq);
+               err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,
+                                     &cq->queue, &cq->is_direct,
+                                     &dev->driver_pd, 1, &cq->mr);
                if (err)
                        goto err_out_mailbox;
 
@@ -858,10 +755,8 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
        return 0;
 
 err_out_free_mr:
-       if (cq->is_kernel) {
-               mthca_free_mr(dev, &cq->mr);
+       if (cq->is_kernel)
                mthca_free_cq_buf(dev, cq);
-       }
 
 err_out_mailbox:
        mthca_free_mailbox(dev, mailbox);
@@ -929,7 +824,6 @@ void mthca_free_cq(struct mthca_dev *dev,
        wait_event(cq->wait, !atomic_read(&cq->refcount));
 
        if (cq->is_kernel) {
-               mthca_free_mr(dev, &cq->mr);
                mthca_free_cq_buf(dev, cq);
                if (mthca_is_memfree(dev)) {
                        mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM,    cq->arm_db_index);
index 0f90a173ecee01a029ec4a6f9f9334ffff19d91a..cb78b5d07201893a69a1def238e486a0b29589be 100644 (file)
@@ -361,6 +361,11 @@ int mthca_array_set(struct mthca_array *array, int index, void *value);
 void mthca_array_clear(struct mthca_array *array, int index);
 int mthca_array_init(struct mthca_array *array, int nent);
 void mthca_array_cleanup(struct mthca_array *array, int nent);
+int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
+                   union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
+                   int hca_write, struct mthca_mr *mr);
+void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
+                   int is_direct, struct mthca_mr *mr);
 
 int mthca_init_uar_table(struct mthca_dev *dev);
 int mthca_init_pd_table(struct mthca_dev *dev);
index 624651edf577bd5d4991f6ea81751a7df2534dda..b95249ee46cf2da1959acef7e9b520ce309b57d7 100644 (file)
@@ -51,6 +51,11 @@ struct mthca_buf_list {
        DECLARE_PCI_UNMAP_ADDR(mapping)
 };
 
+union mthca_buf {
+       struct mthca_buf_list direct;
+       struct mthca_buf_list *page_list;
+};
+
 struct mthca_uar {
        unsigned long pfn;
        int           index;
@@ -187,10 +192,7 @@ struct mthca_cq {
        __be32                *arm_db;
        int                    arm_sn;
 
-       union {
-               struct mthca_buf_list direct;
-               struct mthca_buf_list *page_list;
-       }                      queue;
+       union mthca_buf        queue;
        struct mthca_mr        mr;
        wait_queue_head_t      wait;
 };
@@ -228,10 +230,7 @@ struct mthca_qp {
        int                    send_wqe_offset;
 
        u64                   *wrid;
-       union {
-               struct mthca_buf_list direct;
-               struct mthca_buf_list *page_list;
-       }                      queue;
+       union mthca_buf        queue;
 
        wait_queue_head_t      wait;
 };
index b7e3d2342799086174de85a1d8ab3f44f4248456..b5a0bef15b7e612de702623a4cc726551a1aeb73 100644 (file)
@@ -926,10 +926,6 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
                               struct mthca_qp *qp)
 {
        int size;
-       int i;
-       int npages, shift;
-       dma_addr_t t;
-       u64 *dma_list = NULL;
        int err = -ENOMEM;
 
        size = sizeof (struct mthca_next_seg) +
@@ -979,116 +975,24 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
        if (!qp->wrid)
                goto err_out;
 
-       if (size <= MTHCA_MAX_DIRECT_QP_SIZE) {
-               qp->is_direct = 1;
-               npages = 1;
-               shift = get_order(size) + PAGE_SHIFT;
-
-               if (0)
-                       mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
-                                 size, shift);
-
-               qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size,
-                                                         &t, GFP_KERNEL);
-               if (!qp->queue.direct.buf)
-                       goto err_out;
-
-               pci_unmap_addr_set(&qp->queue.direct, mapping, t);
-
-               memset(qp->queue.direct.buf, 0, size);
-
-               while (t & ((1 << shift) - 1)) {
-                       --shift;
-                       npages *= 2;
-               }
-
-               dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
-               if (!dma_list)
-                       goto err_out_free;
-
-               for (i = 0; i < npages; ++i)
-                       dma_list[i] = t + i * (1 << shift);
-       } else {
-               qp->is_direct = 0;
-               npages = size / PAGE_SIZE;
-               shift = PAGE_SHIFT;
-
-               if (0)
-                       mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);
-
-               dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
-               if (!dma_list)
-                       goto err_out;
-
-               qp->queue.page_list = kmalloc(npages *
-                                             sizeof *qp->queue.page_list,
-                                             GFP_KERNEL);
-               if (!qp->queue.page_list)
-                       goto err_out;
-
-               for (i = 0; i < npages; ++i) {
-                       qp->queue.page_list[i].buf =
-                               dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                                  &t, GFP_KERNEL);
-                       if (!qp->queue.page_list[i].buf)
-                               goto err_out_free;
-
-                       memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);
-
-                       pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);
-                       dma_list[i] = t;
-               }
-       }
-
-       err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,
-                                 npages, 0, size,
-                                 MTHCA_MPT_FLAG_LOCAL_READ,
-                                 &qp->mr);
+       err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
+                             &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
        if (err)
-               goto err_out_free;
+               goto err_out;
 
-       kfree(dma_list);
        return 0;
 
- err_out_free:
-       if (qp->is_direct) {
-               dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
-                                 pci_unmap_addr(&qp->queue.direct, mapping));
-       } else
-               for (i = 0; i < npages; ++i) {
-                       if (qp->queue.page_list[i].buf)
-                               dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                                 qp->queue.page_list[i].buf,
-                                                 pci_unmap_addr(&qp->queue.page_list[i],
-                                                                mapping));
-
-               }
-
- err_out:
+err_out:
        kfree(qp->wrid);
-       kfree(dma_list);
        return err;
 }
 
 static void mthca_free_wqe_buf(struct mthca_dev *dev,
                               struct mthca_qp *qp)
 {
-       int i;
-       int size = PAGE_ALIGN(qp->send_wqe_offset +
-                             (qp->sq.max << qp->sq.wqe_shift));
-
-       if (qp->is_direct) {
-               dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
-                                 pci_unmap_addr(&qp->queue.direct, mapping));
-       } else {
-               for (i = 0; i < size / PAGE_SIZE; ++i) {
-                       dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
-                                         qp->queue.page_list[i].buf,
-                                         pci_unmap_addr(&qp->queue.page_list[i],
-                                                        mapping));
-               }
-       }
-
+       mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
+                                      (qp->sq.max << qp->sq.wqe_shift)),
+                      &qp->queue, qp->is_direct, &qp->mr);
        kfree(qp->wrid);
 }
 
@@ -1433,7 +1337,6 @@ void mthca_free_qp(struct mthca_dev *dev,
                if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
                        mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);
 
-               mthca_free_mr(dev, &qp->mr);
                mthca_free_memfree(dev, qp);
                mthca_free_wqe_buf(dev, qp);
        }