]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/gpu/drm/exynos/exynos_drm_buf.c
dlm: fix QUECVT when convert queue is empty
[linux-beck.git] / drivers / gpu / drm / exynos / exynos_drm_buf.c
1 /* exynos_drm_buf.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23  * OTHER DEALINGS IN THE SOFTWARE.
24  */
25
26 #include "drmP.h"
27 #include "drm.h"
28 #include "exynos_drm.h"
29
30 #include "exynos_drm_drv.h"
31 #include "exynos_drm_gem.h"
32 #include "exynos_drm_buf.h"
33
34 static int lowlevel_buffer_allocate(struct drm_device *dev,
35                 unsigned int flags, struct exynos_drm_gem_buf *buf)
36 {
37         dma_addr_t start_addr, end_addr;
38         unsigned int npages, page_size, i = 0;
39         struct scatterlist *sgl;
40         int ret = 0;
41
42         DRM_DEBUG_KMS("%s\n", __FILE__);
43
44         if (flags & EXYNOS_BO_NONCONTIG) {
45                 DRM_DEBUG_KMS("not support allocation type.\n");
46                 return -EINVAL;
47         }
48
49         if (buf->dma_addr) {
50                 DRM_DEBUG_KMS("already allocated.\n");
51                 return 0;
52         }
53
54         if (buf->size >= SZ_1M) {
55                 npages = (buf->size >> SECTION_SHIFT) + 1;
56                 page_size = SECTION_SIZE;
57         } else if (buf->size >= SZ_64K) {
58                 npages = (buf->size >> 16) + 1;
59                 page_size = SZ_64K;
60         } else {
61                 npages = (buf->size >> PAGE_SHIFT) + 1;
62                 page_size = PAGE_SIZE;
63         }
64
65         buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
66         if (!buf->sgt) {
67                 DRM_ERROR("failed to allocate sg table.\n");
68                 return -ENOMEM;
69         }
70
71         ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
72         if (ret < 0) {
73                 DRM_ERROR("failed to initialize sg table.\n");
74                 kfree(buf->sgt);
75                 buf->sgt = NULL;
76                 return -ENOMEM;
77         }
78
79                 buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
80                                 &buf->dma_addr, GFP_KERNEL);
81                 if (!buf->kvaddr) {
82                         DRM_ERROR("failed to allocate buffer.\n");
83                         ret = -ENOMEM;
84                         goto err1;
85                 }
86
87                 start_addr = buf->dma_addr;
88                 end_addr = buf->dma_addr + buf->size;
89
90                 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
91                 if (!buf->pages) {
92                         DRM_ERROR("failed to allocate pages.\n");
93                         ret = -ENOMEM;
94                         goto err2;
95                 }
96
97         start_addr = buf->dma_addr;
98         end_addr = buf->dma_addr + buf->size;
99
100         buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
101         if (!buf->pages) {
102                 DRM_ERROR("failed to allocate pages.\n");
103                 ret = -ENOMEM;
104                 goto err2;
105         }
106
107         sgl = buf->sgt->sgl;
108
109         while (i < npages) {
110                 buf->pages[i] = phys_to_page(start_addr);
111                 sg_set_page(sgl, buf->pages[i], page_size, 0);
112                 sg_dma_address(sgl) = start_addr;
113                 start_addr += page_size;
114                 if (end_addr - start_addr < page_size)
115                         break;
116                 sgl = sg_next(sgl);
117                 i++;
118         }
119
120         buf->pages[i] = phys_to_page(start_addr);
121
122         sgl = sg_next(sgl);
123         sg_set_page(sgl, buf->pages[i+1], end_addr - start_addr, 0);
124
125         DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
126                         (unsigned long)buf->kvaddr,
127                         (unsigned long)buf->dma_addr,
128                         buf->size);
129
130         return ret;
131 err2:
132         dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
133                         (dma_addr_t)buf->dma_addr);
134         buf->dma_addr = (dma_addr_t)NULL;
135 err1:
136         sg_free_table(buf->sgt);
137         kfree(buf->sgt);
138         buf->sgt = NULL;
139
140         return ret;
141 }
142
143 static void lowlevel_buffer_deallocate(struct drm_device *dev,
144                 unsigned int flags, struct exynos_drm_gem_buf *buf)
145 {
146         DRM_DEBUG_KMS("%s.\n", __FILE__);
147
148         /*
149          * release only physically continuous memory and
150          * non-continuous memory would be released by exynos
151          * gem framework.
152          */
153         if (flags & EXYNOS_BO_NONCONTIG) {
154                 DRM_DEBUG_KMS("not support allocation type.\n");
155                 return;
156         }
157
158         if (!buf->dma_addr) {
159                 DRM_DEBUG_KMS("dma_addr is invalid.\n");
160                 return;
161         }
162
163         DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
164                         (unsigned long)buf->kvaddr,
165                         (unsigned long)buf->dma_addr,
166                         buf->size);
167
168         sg_free_table(buf->sgt);
169
170         kfree(buf->sgt);
171         buf->sgt = NULL;
172
173         kfree(buf->pages);
174         buf->pages = NULL;
175
176         dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
177                                 (dma_addr_t)buf->dma_addr);
178         buf->dma_addr = (dma_addr_t)NULL;
179 }
180
181 struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
182                                                 unsigned int size)
183 {
184         struct exynos_drm_gem_buf *buffer;
185
186         DRM_DEBUG_KMS("%s.\n", __FILE__);
187         DRM_DEBUG_KMS("desired size = 0x%x\n", size);
188
189         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
190         if (!buffer) {
191                 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
192                 return NULL;
193         }
194
195         buffer->size = size;
196         return buffer;
197 }
198
199 void exynos_drm_fini_buf(struct drm_device *dev,
200                                 struct exynos_drm_gem_buf *buffer)
201 {
202         DRM_DEBUG_KMS("%s.\n", __FILE__);
203
204         if (!buffer) {
205                 DRM_DEBUG_KMS("buffer is null.\n");
206                 return;
207         }
208
209         kfree(buffer);
210         buffer = NULL;
211 }
212
213 int exynos_drm_alloc_buf(struct drm_device *dev,
214                 struct exynos_drm_gem_buf *buf, unsigned int flags)
215 {
216
217         /*
218          * allocate memory region and set the memory information
219          * to vaddr and dma_addr of a buffer object.
220          */
221         if (lowlevel_buffer_allocate(dev, flags, buf) < 0)
222                 return -ENOMEM;
223
224         return 0;
225 }
226
227 void exynos_drm_free_buf(struct drm_device *dev,
228                 unsigned int flags, struct exynos_drm_gem_buf *buffer)
229 {
230
231         lowlevel_buffer_deallocate(dev, flags, buffer);
232 }