]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/exynos/exynos_drm_buf.c
rt2x00: rt2800pci: use module_pci_driver macro
[karo-tx-linux.git] / drivers / gpu / drm / exynos / exynos_drm_buf.c
1 /* exynos_drm_buf.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11
12 #include <drm/drmP.h>
13 #include <drm/exynos_drm.h>
14
15 #include "exynos_drm_drv.h"
16 #include "exynos_drm_gem.h"
17 #include "exynos_drm_buf.h"
18 #include "exynos_drm_iommu.h"
19
20 static int lowlevel_buffer_allocate(struct drm_device *dev,
21                 unsigned int flags, struct exynos_drm_gem_buf *buf)
22 {
23         int ret = 0;
24         enum dma_attr attr;
25         unsigned int nr_pages;
26
27         if (buf->dma_addr) {
28                 DRM_DEBUG_KMS("already allocated.\n");
29                 return 0;
30         }
31
32         init_dma_attrs(&buf->dma_attrs);
33
34         /*
35          * if EXYNOS_BO_CONTIG, fully physically contiguous memory
36          * region will be allocated else physically contiguous
37          * as possible.
38          */
39         if (!(flags & EXYNOS_BO_NONCONTIG))
40                 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
41
42         /*
43          * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
44          * else cachable mapping.
45          */
46         if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
47                 attr = DMA_ATTR_WRITE_COMBINE;
48         else
49                 attr = DMA_ATTR_NON_CONSISTENT;
50
51         dma_set_attr(attr, &buf->dma_attrs);
52         dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
53
54         nr_pages = buf->size >> PAGE_SHIFT;
55
56         if (!is_drm_iommu_supported(dev)) {
57                 dma_addr_t start_addr;
58                 unsigned int i = 0;
59
60                 buf->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
61                 if (!buf->pages) {
62                         DRM_ERROR("failed to allocate pages.\n");
63                         return -ENOMEM;
64                 }
65
66                 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
67                                         &buf->dma_addr, GFP_KERNEL,
68                                         &buf->dma_attrs);
69                 if (!buf->kvaddr) {
70                         DRM_ERROR("failed to allocate buffer.\n");
71                         ret = -ENOMEM;
72                         goto err_free;
73                 }
74
75                 start_addr = buf->dma_addr;
76                 while (i < nr_pages) {
77                         buf->pages[i] = phys_to_page(start_addr);
78                         start_addr += PAGE_SIZE;
79                         i++;
80                 }
81         } else {
82
83                 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
84                                         &buf->dma_addr, GFP_KERNEL,
85                                         &buf->dma_attrs);
86                 if (!buf->pages) {
87                         DRM_ERROR("failed to allocate buffer.\n");
88                         return -ENOMEM;
89                 }
90         }
91
92         buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
93         if (!buf->sgt) {
94                 DRM_ERROR("failed to get sg table.\n");
95                 ret = -ENOMEM;
96                 goto err_free_attrs;
97         }
98
99         DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
100                         (unsigned long)buf->dma_addr,
101                         buf->size);
102
103         return ret;
104
105 err_free_attrs:
106         dma_free_attrs(dev->dev, buf->size, buf->pages,
107                         (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
108         buf->dma_addr = (dma_addr_t)NULL;
109 err_free:
110         if (!is_drm_iommu_supported(dev))
111                 drm_free_large(buf->pages);
112
113         return ret;
114 }
115
116 static void lowlevel_buffer_deallocate(struct drm_device *dev,
117                 unsigned int flags, struct exynos_drm_gem_buf *buf)
118 {
119         if (!buf->dma_addr) {
120                 DRM_DEBUG_KMS("dma_addr is invalid.\n");
121                 return;
122         }
123
124         DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
125                         (unsigned long)buf->dma_addr,
126                         buf->size);
127
128         sg_free_table(buf->sgt);
129
130         kfree(buf->sgt);
131         buf->sgt = NULL;
132
133         if (!is_drm_iommu_supported(dev)) {
134                 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
135                                 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
136                 drm_free_large(buf->pages);
137         } else
138                 dma_free_attrs(dev->dev, buf->size, buf->pages,
139                                 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
140
141         buf->dma_addr = (dma_addr_t)NULL;
142 }
143
144 struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
145                                                 unsigned int size)
146 {
147         struct exynos_drm_gem_buf *buffer;
148
149         DRM_DEBUG_KMS("desired size = 0x%x\n", size);
150
151         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
152         if (!buffer)
153                 return NULL;
154
155         buffer->size = size;
156         return buffer;
157 }
158
159 void exynos_drm_fini_buf(struct drm_device *dev,
160                                 struct exynos_drm_gem_buf *buffer)
161 {
162         kfree(buffer);
163         buffer = NULL;
164 }
165
166 int exynos_drm_alloc_buf(struct drm_device *dev,
167                 struct exynos_drm_gem_buf *buf, unsigned int flags)
168 {
169
170         /*
171          * allocate memory region and set the memory information
172          * to vaddr and dma_addr of a buffer object.
173          */
174         if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
175                 return -ENOMEM;
176
177         return 0;
178 }
179
180 void exynos_drm_free_buf(struct drm_device *dev,
181                 unsigned int flags, struct exynos_drm_gem_buf *buffer)
182 {
183
184         lowlevel_buffer_deallocate(dev, flags, buffer);
185 }