3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
13 #include <drm/exynos_drm.h>
14 #include "exynos_drm_drv.h"
15 #include "exynos_drm_gem.h"
17 #include <linux/dma-buf.h>
19 struct exynos_drm_dmabuf_attachment {
21 enum dma_data_direction dir;
25 static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
27 struct dma_buf_attachment *attach)
29 struct exynos_drm_dmabuf_attachment *exynos_attach;
31 exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
35 exynos_attach->dir = DMA_NONE;
36 attach->priv = exynos_attach;
41 static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
42 struct dma_buf_attachment *attach)
44 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
50 sgt = &exynos_attach->sgt;
52 if (exynos_attach->dir != DMA_NONE)
53 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
61 static struct sg_table *
62 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
63 enum dma_data_direction dir)
65 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
66 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
67 struct drm_device *dev = gem_obj->base.dev;
68 struct exynos_drm_gem_buf *buf;
69 struct scatterlist *rd, *wr;
70 struct sg_table *sgt = NULL;
74 /* just return current sgt if already requested. */
75 if (exynos_attach->dir == dir && exynos_attach->is_mapped)
76 return &exynos_attach->sgt;
78 buf = gem_obj->buffer;
80 DRM_ERROR("buffer is null.\n");
81 return ERR_PTR(-ENOMEM);
84 sgt = &exynos_attach->sgt;
86 ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
88 DRM_ERROR("failed to alloc sgt.\n");
89 return ERR_PTR(-ENOMEM);
92 mutex_lock(&dev->struct_mutex);
96 for (i = 0; i < sgt->orig_nents; ++i) {
97 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
102 if (dir != DMA_NONE) {
103 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
105 DRM_ERROR("failed to map sgl with iommu.\n");
112 exynos_attach->is_mapped = true;
113 exynos_attach->dir = dir;
114 attach->priv = exynos_attach;
116 DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
119 mutex_unlock(&dev->struct_mutex);
123 static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
124 struct sg_table *sgt,
125 enum dma_data_direction dir)
130 static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
131 unsigned long page_num)
138 static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
139 unsigned long page_num,
145 static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
146 unsigned long page_num)
153 static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
154 unsigned long page_num, void *addr)
159 static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
160 struct vm_area_struct *vma)
165 static struct dma_buf_ops exynos_dmabuf_ops = {
166 .attach = exynos_gem_attach_dma_buf,
167 .detach = exynos_gem_detach_dma_buf,
168 .map_dma_buf = exynos_gem_map_dma_buf,
169 .unmap_dma_buf = exynos_gem_unmap_dma_buf,
170 .kmap = exynos_gem_dmabuf_kmap,
171 .kmap_atomic = exynos_gem_dmabuf_kmap_atomic,
172 .kunmap = exynos_gem_dmabuf_kunmap,
173 .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
174 .mmap = exynos_gem_dmabuf_mmap,
175 .release = drm_gem_dmabuf_release,
178 struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
179 struct drm_gem_object *obj, int flags)
181 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
183 return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
184 exynos_gem_obj->base.size, flags);
187 struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
188 struct dma_buf *dma_buf)
190 struct dma_buf_attachment *attach;
191 struct sg_table *sgt;
192 struct scatterlist *sgl;
193 struct exynos_drm_gem_obj *exynos_gem_obj;
194 struct exynos_drm_gem_buf *buffer;
197 /* is this one of own objects? */
198 if (dma_buf->ops == &exynos_dmabuf_ops) {
199 struct drm_gem_object *obj;
201 exynos_gem_obj = dma_buf->priv;
202 obj = &exynos_gem_obj->base;
204 /* is it from our device? */
205 if (obj->dev == drm_dev) {
207 * Importing dmabuf exported from out own gem increases
208 * refcount on gem itself instead of f_count of dmabuf.
210 drm_gem_object_reference(obj);
215 attach = dma_buf_attach(dma_buf, drm_dev->dev);
217 return ERR_PTR(-EINVAL);
219 get_dma_buf(dma_buf);
221 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
222 if (IS_ERR_OR_NULL(sgt)) {
227 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
229 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
231 goto err_unmap_attach;
234 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
235 if (!exynos_gem_obj) {
237 goto err_free_buffer;
242 buffer->size = dma_buf->size;
243 buffer->dma_addr = sg_dma_address(sgl);
245 if (sgt->nents == 1) {
246 /* always physically continuous memory if sgt->nents is 1. */
247 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
250 * this case could be CONTIG or NONCONTIG type but for now
252 * TODO. we have to find a way that exporter can notify
253 * the type of its own buffer to importer.
255 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
258 exynos_gem_obj->buffer = buffer;
260 exynos_gem_obj->base.import_attach = attach;
262 DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
265 return &exynos_gem_obj->base;
271 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
273 dma_buf_detach(dma_buf, attach);
274 dma_buf_put(dma_buf);
279 MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
280 MODULE_DESCRIPTION("Samsung SoC DRM DMABUF Module");
281 MODULE_LICENSE("GPL");