]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/i915_gem_dmabuf.c
Merge branch 'upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy...
[karo-tx-linux.git] / drivers / gpu / drm / i915 / i915_gem_dmabuf.c
1 /*
2  * Copyright 2012 Red Hat Inc
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Dave Airlie <airlied@redhat.com>
25  */
26 #include "drmP.h"
27 #include "i915_drv.h"
28 #include <linux/dma-buf.h>
29
30 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
31                                       enum dma_data_direction dir)
32 {
33         struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
34         struct drm_device *dev = obj->base.dev;
35         int npages = obj->base.size / PAGE_SIZE;
36         struct sg_table *sg = NULL;
37         int ret;
38         int nents;
39
40         ret = i915_mutex_lock_interruptible(dev);
41         if (ret)
42                 return ERR_PTR(ret);
43
44         if (!obj->pages) {
45                 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
46                 if (ret)
47                         goto out;
48         }
49
50         /* link the pages into an SG then map the sg */
51         sg = drm_prime_pages_to_sg(obj->pages, npages);
52         nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
53 out:
54         mutex_unlock(&dev->struct_mutex);
55         return sg;
56 }
57
58 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
59                             struct sg_table *sg, enum dma_data_direction dir)
60 {
61         dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
62         sg_free_table(sg);
63         kfree(sg);
64 }
65
66 static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
67 {
68         struct drm_i915_gem_object *obj = dma_buf->priv;
69
70         if (obj->base.export_dma_buf == dma_buf) {
71                 /* drop the reference on the export fd holds */
72                 obj->base.export_dma_buf = NULL;
73                 drm_gem_object_unreference_unlocked(&obj->base);
74         }
75 }
76
77 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
78 {
79         struct drm_i915_gem_object *obj = dma_buf->priv;
80         struct drm_device *dev = obj->base.dev;
81         int ret;
82
83         ret = i915_mutex_lock_interruptible(dev);
84         if (ret)
85                 return ERR_PTR(ret);
86
87         if (obj->dma_buf_vmapping) {
88                 obj->vmapping_count++;
89                 goto out_unlock;
90         }
91
92         if (!obj->pages) {
93                 ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN);
94                 if (ret) {
95                         mutex_unlock(&dev->struct_mutex);
96                         return ERR_PTR(ret);
97                 }
98         }
99
100         obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
101         if (!obj->dma_buf_vmapping) {
102                 DRM_ERROR("failed to vmap object\n");
103                 goto out_unlock;
104         }
105
106         obj->vmapping_count = 1;
107 out_unlock:
108         mutex_unlock(&dev->struct_mutex);
109         return obj->dma_buf_vmapping;
110 }
111
112 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
113 {
114         struct drm_i915_gem_object *obj = dma_buf->priv;
115         struct drm_device *dev = obj->base.dev;
116         int ret;
117
118         ret = i915_mutex_lock_interruptible(dev);
119         if (ret)
120                 return;
121
122         --obj->vmapping_count;
123         if (obj->vmapping_count == 0) {
124                 vunmap(obj->dma_buf_vmapping);
125                 obj->dma_buf_vmapping = NULL;
126         }
127         mutex_unlock(&dev->struct_mutex);
128 }
129
130 static void *i915_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
131 {
132         return NULL;
133 }
134
135 static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
136 {
137
138 }
139 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
140 {
141         return NULL;
142 }
143
144 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
145 {
146
147 }
148
149 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
150 {
151         return -EINVAL;
152 }
153
154 static const struct dma_buf_ops i915_dmabuf_ops =  {
155         .map_dma_buf = i915_gem_map_dma_buf,
156         .unmap_dma_buf = i915_gem_unmap_dma_buf,
157         .release = i915_gem_dmabuf_release,
158         .kmap = i915_gem_dmabuf_kmap,
159         .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
160         .kunmap = i915_gem_dmabuf_kunmap,
161         .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
162         .mmap = i915_gem_dmabuf_mmap,
163         .vmap = i915_gem_dmabuf_vmap,
164         .vunmap = i915_gem_dmabuf_vunmap,
165 };
166
167 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
168                                 struct drm_gem_object *gem_obj, int flags)
169 {
170         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
171
172         return dma_buf_export(obj, &i915_dmabuf_ops,
173                                                   obj->base.size, 0600);
174 }
175
176 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
177                                 struct dma_buf *dma_buf)
178 {
179         struct dma_buf_attachment *attach;
180         struct sg_table *sg;
181         struct drm_i915_gem_object *obj;
182         int npages;
183         int size;
184         int ret;
185
186         /* is this one of own objects? */
187         if (dma_buf->ops == &i915_dmabuf_ops) {
188                 obj = dma_buf->priv;
189                 /* is it from our device? */
190                 if (obj->base.dev == dev) {
191                         drm_gem_object_reference(&obj->base);
192                         return &obj->base;
193                 }
194         }
195
196         /* need to attach */
197         attach = dma_buf_attach(dma_buf, dev->dev);
198         if (IS_ERR(attach))
199                 return ERR_CAST(attach);
200
201         sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
202         if (IS_ERR(sg)) {
203                 ret = PTR_ERR(sg);
204                 goto fail_detach;
205         }
206
207         size = dma_buf->size;
208         npages = size / PAGE_SIZE;
209
210         obj = kzalloc(sizeof(*obj), GFP_KERNEL);
211         if (obj == NULL) {
212                 ret = -ENOMEM;
213                 goto fail_unmap;
214         }
215
216         ret = drm_gem_private_object_init(dev, &obj->base, size);
217         if (ret) {
218                 kfree(obj);
219                 goto fail_unmap;
220         }
221
222         obj->sg_table = sg;
223         obj->base.import_attach = attach;
224
225         return &obj->base;
226
227 fail_unmap:
228         dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
229 fail_detach:
230         dma_buf_detach(dma_buf, attach);
231         return ERR_PTR(ret);
232 }