3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/exynos_drm.h>
15 #include "exynos_drm_drv.h"
16 #include "exynos_drm_gem.h"
17 #include "exynos_drm_buf.h"
18 #include "exynos_drm_iommu.h"
20 static int lowlevel_buffer_allocate(struct drm_device *dev,
21 unsigned int flags, struct exynos_drm_gem_buf *buf)
25 unsigned int nr_pages;
28 DRM_DEBUG_KMS("already allocated.\n");
32 init_dma_attrs(&buf->dma_attrs);
35 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
36 * region will be allocated else physically contiguous
39 if (!(flags & EXYNOS_BO_NONCONTIG))
40 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
43 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
44 * else cachable mapping.
46 if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
47 attr = DMA_ATTR_WRITE_COMBINE;
49 attr = DMA_ATTR_NON_CONSISTENT;
51 dma_set_attr(attr, &buf->dma_attrs);
52 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
54 nr_pages = buf->size >> PAGE_SHIFT;
56 if (!is_drm_iommu_supported(dev)) {
57 dma_addr_t start_addr;
60 buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
62 DRM_ERROR("failed to allocate pages.\n");
66 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
67 &buf->dma_addr, GFP_KERNEL,
70 DRM_ERROR("failed to allocate buffer.\n");
75 start_addr = buf->dma_addr;
76 while (i < nr_pages) {
77 buf->pages[i] = phys_to_page(start_addr);
78 start_addr += PAGE_SIZE;
83 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
84 &buf->dma_addr, GFP_KERNEL,
87 DRM_ERROR("failed to allocate buffer.\n");
92 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
94 DRM_ERROR("failed to get sg table.\n");
99 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
100 (unsigned long)buf->dma_addr,
106 dma_free_attrs(dev->dev, buf->size, buf->pages,
107 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
108 buf->dma_addr = (dma_addr_t)NULL;
110 if (!is_drm_iommu_supported(dev))
111 drm_free_large(buf->pages);
116 static void lowlevel_buffer_deallocate(struct drm_device *dev,
117 unsigned int flags, struct exynos_drm_gem_buf *buf)
119 if (!buf->dma_addr) {
120 DRM_DEBUG_KMS("dma_addr is invalid.\n");
124 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
125 (unsigned long)buf->dma_addr,
128 sg_free_table(buf->sgt);
133 if (!is_drm_iommu_supported(dev)) {
134 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
135 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
136 drm_free_large(buf->pages);
138 dma_free_attrs(dev->dev, buf->size, buf->pages,
139 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
141 buf->dma_addr = (dma_addr_t)NULL;
144 struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
147 struct exynos_drm_gem_buf *buffer;
149 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
151 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
159 void exynos_drm_fini_buf(struct drm_device *dev,
160 struct exynos_drm_gem_buf *buffer)
166 int exynos_drm_alloc_buf(struct drm_device *dev,
167 struct exynos_drm_gem_buf *buf, unsigned int flags)
171 * allocate memory region and set the memory information
172 * to vaddr and dma_addr of a buffer object.
174 if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
180 void exynos_drm_free_buf(struct drm_device *dev,
181 unsigned int flags, struct exynos_drm_gem_buf *buffer)
184 lowlevel_buffer_deallocate(dev, flags, buffer);