]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drm: use common drm_gem_dmabuf_release in i915/exynos drivers
[karo-tx-linux.git] / drivers / gpu / drm / exynos / exynos_drm_dmabuf.c
1 /* exynos_drm_dmabuf.c
2  *
3  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11
12 #include <drm/drmP.h>
13 #include <drm/exynos_drm.h>
14 #include "exynos_drm_drv.h"
15 #include "exynos_drm_gem.h"
16
17 #include <linux/dma-buf.h>
18
19 struct exynos_drm_dmabuf_attachment {
20         struct sg_table sgt;
21         enum dma_data_direction dir;
22         bool is_mapped;
23 };
24
25 static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
26                                         struct device *dev,
27                                         struct dma_buf_attachment *attach)
28 {
29         struct exynos_drm_dmabuf_attachment *exynos_attach;
30
31         exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
32         if (!exynos_attach)
33                 return -ENOMEM;
34
35         exynos_attach->dir = DMA_NONE;
36         attach->priv = exynos_attach;
37
38         return 0;
39 }
40
41 static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
42                                         struct dma_buf_attachment *attach)
43 {
44         struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
45         struct sg_table *sgt;
46
47         if (!exynos_attach)
48                 return;
49
50         sgt = &exynos_attach->sgt;
51
52         if (exynos_attach->dir != DMA_NONE)
53                 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
54                                 exynos_attach->dir);
55
56         sg_free_table(sgt);
57         kfree(exynos_attach);
58         attach->priv = NULL;
59 }
60
61 static struct sg_table *
62                 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
63                                         enum dma_data_direction dir)
64 {
65         struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
66         struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
67         struct drm_device *dev = gem_obj->base.dev;
68         struct exynos_drm_gem_buf *buf;
69         struct scatterlist *rd, *wr;
70         struct sg_table *sgt = NULL;
71         unsigned int i;
72         int nents, ret;
73
74         /* just return current sgt if already requested. */
75         if (exynos_attach->dir == dir && exynos_attach->is_mapped)
76                 return &exynos_attach->sgt;
77
78         buf = gem_obj->buffer;
79         if (!buf) {
80                 DRM_ERROR("buffer is null.\n");
81                 return ERR_PTR(-ENOMEM);
82         }
83
84         sgt = &exynos_attach->sgt;
85
86         ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
87         if (ret) {
88                 DRM_ERROR("failed to alloc sgt.\n");
89                 return ERR_PTR(-ENOMEM);
90         }
91
92         mutex_lock(&dev->struct_mutex);
93
94         rd = buf->sgt->sgl;
95         wr = sgt->sgl;
96         for (i = 0; i < sgt->orig_nents; ++i) {
97                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
98                 rd = sg_next(rd);
99                 wr = sg_next(wr);
100         }
101
102         if (dir != DMA_NONE) {
103                 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
104                 if (!nents) {
105                         DRM_ERROR("failed to map sgl with iommu.\n");
106                         sg_free_table(sgt);
107                         sgt = ERR_PTR(-EIO);
108                         goto err_unlock;
109                 }
110         }
111
112         exynos_attach->is_mapped = true;
113         exynos_attach->dir = dir;
114         attach->priv = exynos_attach;
115
116         DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
117
118 err_unlock:
119         mutex_unlock(&dev->struct_mutex);
120         return sgt;
121 }
122
123 static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
124                                                 struct sg_table *sgt,
125                                                 enum dma_data_direction dir)
126 {
127         /* Nothing to do. */
128 }
129
130 static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
131                                                 unsigned long page_num)
132 {
133         /* TODO */
134
135         return NULL;
136 }
137
138 static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
139                                                 unsigned long page_num,
140                                                 void *addr)
141 {
142         /* TODO */
143 }
144
145 static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
146                                         unsigned long page_num)
147 {
148         /* TODO */
149
150         return NULL;
151 }
152
153 static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
154                                         unsigned long page_num, void *addr)
155 {
156         /* TODO */
157 }
158
159 static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
160         struct vm_area_struct *vma)
161 {
162         return -ENOTTY;
163 }
164
165 static struct dma_buf_ops exynos_dmabuf_ops = {
166         .attach                 = exynos_gem_attach_dma_buf,
167         .detach                 = exynos_gem_detach_dma_buf,
168         .map_dma_buf            = exynos_gem_map_dma_buf,
169         .unmap_dma_buf          = exynos_gem_unmap_dma_buf,
170         .kmap                   = exynos_gem_dmabuf_kmap,
171         .kmap_atomic            = exynos_gem_dmabuf_kmap_atomic,
172         .kunmap                 = exynos_gem_dmabuf_kunmap,
173         .kunmap_atomic          = exynos_gem_dmabuf_kunmap_atomic,
174         .mmap                   = exynos_gem_dmabuf_mmap,
175         .release                = drm_gem_dmabuf_release,
176 };
177
178 struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
179                                 struct drm_gem_object *obj, int flags)
180 {
181         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
182
183         return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
184                                 exynos_gem_obj->base.size, flags);
185 }
186
187 struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
188                                 struct dma_buf *dma_buf)
189 {
190         struct dma_buf_attachment *attach;
191         struct sg_table *sgt;
192         struct scatterlist *sgl;
193         struct exynos_drm_gem_obj *exynos_gem_obj;
194         struct exynos_drm_gem_buf *buffer;
195         int ret;
196
197         /* is this one of own objects? */
198         if (dma_buf->ops == &exynos_dmabuf_ops) {
199                 struct drm_gem_object *obj;
200
201                 exynos_gem_obj = dma_buf->priv;
202                 obj = &exynos_gem_obj->base;
203
204                 /* is it from our device? */
205                 if (obj->dev == drm_dev) {
206                         /*
207                          * Importing dmabuf exported from out own gem increases
208                          * refcount on gem itself instead of f_count of dmabuf.
209                          */
210                         drm_gem_object_reference(obj);
211                         return obj;
212                 }
213         }
214
215         attach = dma_buf_attach(dma_buf, drm_dev->dev);
216         if (IS_ERR(attach))
217                 return ERR_PTR(-EINVAL);
218
219         get_dma_buf(dma_buf);
220
221         sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
222         if (IS_ERR_OR_NULL(sgt)) {
223                 ret = PTR_ERR(sgt);
224                 goto err_buf_detach;
225         }
226
227         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
228         if (!buffer) {
229                 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
230                 ret = -ENOMEM;
231                 goto err_unmap_attach;
232         }
233
234         exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
235         if (!exynos_gem_obj) {
236                 ret = -ENOMEM;
237                 goto err_free_buffer;
238         }
239
240         sgl = sgt->sgl;
241
242         buffer->size = dma_buf->size;
243         buffer->dma_addr = sg_dma_address(sgl);
244
245         if (sgt->nents == 1) {
246                 /* always physically continuous memory if sgt->nents is 1. */
247                 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
248         } else {
249                 /*
250                  * this case could be CONTIG or NONCONTIG type but for now
251                  * sets NONCONTIG.
252                  * TODO. we have to find a way that exporter can notify
253                  * the type of its own buffer to importer.
254                  */
255                 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
256         }
257
258         exynos_gem_obj->buffer = buffer;
259         buffer->sgt = sgt;
260         exynos_gem_obj->base.import_attach = attach;
261
262         DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
263                                                                 buffer->size);
264
265         return &exynos_gem_obj->base;
266
267 err_free_buffer:
268         kfree(buffer);
269         buffer = NULL;
270 err_unmap_attach:
271         dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
272 err_buf_detach:
273         dma_buf_detach(dma_buf, attach);
274         dma_buf_put(dma_buf);
275
276         return ERR_PTR(ret);
277 }
278
279 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
280 MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
281 MODULE_LICENSE("GPL");