]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
drm/exynos: stop copying sg table
authorJoonyoung Shim <jy0922.shim@samsung.com>
Tue, 28 Jul 2015 08:53:20 +0000 (17:53 +0900)
committerInki Dae <inki.dae@samsung.com>
Sun, 16 Aug 2015 04:33:44 +0000 (13:33 +0900)
Already struct exynos_drm_gem_buf has pages of the buffer, so we don't
need to copy from sg table of the buffer to sg table of dma-buf
attachment, just can make sg table from pages of the buffer.

Signed-off-by: Joonyoung Shim <jy0922.shim@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_gem.c
drivers/gpu/drm/exynos/exynos_drm_gem.h

index d10f9b602bf720dadf2bef5eaf584898fa057cac..619ecddf35fac44f3e38af7e75b75d67797dada5 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/dma-buf.h>
 
 struct exynos_drm_dmabuf_attachment {
-       struct sg_table sgt;
+       struct sg_table *sgt;
        enum dma_data_direction dir;
        bool is_mapped;
 };
@@ -53,13 +53,15 @@ static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
        if (!exynos_attach)
                return;
 
-       sgt = &exynos_attach->sgt;
-
-       if (exynos_attach->dir != DMA_NONE)
-               dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
-                               exynos_attach->dir);
+       sgt = exynos_attach->sgt;
+       if (sgt) {
+               if (exynos_attach->dir != DMA_NONE)
+                       dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
+                                       exynos_attach->dir);
+               sg_free_table(sgt);
+       }
 
-       sg_free_table(sgt);
+       kfree(sgt);
        kfree(exynos_attach);
        attach->priv = NULL;
 }
@@ -70,16 +72,13 @@ static struct sg_table *
 {
        struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
        struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
-       struct drm_device *dev = gem_obj->base.dev;
        struct exynos_drm_gem_buf *buf;
-       struct scatterlist *rd, *wr;
-       struct sg_table *sgt = NULL;
-       unsigned int i;
-       int nents, ret;
+       struct sg_table *sgt;
+       int npages;
 
        /* just return current sgt if already requested. */
        if (exynos_attach->dir == dir && exynos_attach->is_mapped)
-               return &exynos_attach->sgt;
+               return exynos_attach->sgt;
 
        buf = gem_obj->buffer;
        if (!buf) {
@@ -87,42 +86,29 @@ static struct sg_table *
                return ERR_PTR(-ENOMEM);
        }
 
-       sgt = &exynos_attach->sgt;
+       npages = buf->size >> PAGE_SHIFT;
 
-       ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
-       if (ret) {
-               DRM_ERROR("failed to alloc sgt.\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       mutex_lock(&dev->struct_mutex);
-
-       rd = buf->sgt->sgl;
-       wr = sgt->sgl;
-       for (i = 0; i < sgt->orig_nents; ++i) {
-               sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
-               rd = sg_next(rd);
-               wr = sg_next(wr);
-       }
+       sgt = drm_prime_pages_to_sg(buf->pages, npages);
+       if (IS_ERR(sgt))
+               goto err;
 
        if (dir != DMA_NONE) {
-               nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
-               if (!nents) {
+               if (!dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir)) {
                        DRM_ERROR("failed to map sgl with iommu.\n");
                        sg_free_table(sgt);
                        sgt = ERR_PTR(-EIO);
-                       goto err_unlock;
+                       goto err;
                }
        }
 
        exynos_attach->is_mapped = true;
+       exynos_attach->sgt = sgt;
        exynos_attach->dir = dir;
        attach->priv = exynos_attach;
 
        DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
 
-err_unlock:
-       mutex_unlock(&dev->struct_mutex);
+err:
        return sgt;
 }
 
@@ -280,7 +266,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
        }
 
        exynos_gem_obj->buffer = buffer;
-       buffer->sgt = sgt;
        exynos_gem_obj->base.import_attach = attach;
 
        DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
index 4d9a099076073473ac449d21746f1ce441d5c13a..fa04b9add09aaf79cbb5e8c1c945adbe4b2c40b0 100644 (file)
@@ -455,9 +455,6 @@ void exynos_drm_gem_free_object(struct drm_gem_object *obj)
        exynos_gem_obj = to_exynos_gem_obj(obj);
        buf = exynos_gem_obj->buffer;
 
-       if (obj->import_attach)
-               drm_prime_gem_destroy(obj, buf->sgt);
-
        exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
 }
 
index 6f42e224828890cd116b2d32c40bf4daf329adc4..5979f22828d47fed5dd5ad5d66cedeb5a8575429 100644 (file)
@@ -30,7 +30,6 @@
  *     device address with IOMMU.
  * @write: whether pages will be written to by the caller.
  * @pages: Array of backing pages.
- * @sgt: sg table to transfer page data.
  * @size: size of allocated memory region.
  * @pfnmap: indicate whether memory region from userptr is mmaped with
  *     VM_PFNMAP or not.
@@ -43,7 +42,6 @@ struct exynos_drm_gem_buf {
        struct dma_attrs        dma_attrs;
        unsigned int            write;
        struct page             **pages;
-       struct sg_table         *sgt;
        unsigned long           size;
        bool                    pfnmap;
 };