]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/exynos/exynos_drm_gem.c
4d9a099076073473ac449d21746f1ce441d5c13a
[karo-tx-linux.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14
15 #include <linux/shmem_fs.h>
16 #include <drm/exynos_drm.h>
17
18 #include "exynos_drm_drv.h"
19 #include "exynos_drm_gem.h"
20 #include "exynos_drm_buf.h"
21 #include "exynos_drm_iommu.h"
22
23 static int check_gem_flags(unsigned int flags)
24 {
25         if (flags & ~(EXYNOS_BO_MASK)) {
26                 DRM_ERROR("invalid flags.\n");
27                 return -EINVAL;
28         }
29
30         return 0;
31 }
32
33 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
34                                         struct vm_area_struct *vma)
35 {
36         DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
37
38         /* non-cachable as default. */
39         if (obj->flags & EXYNOS_BO_CACHABLE)
40                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41         else if (obj->flags & EXYNOS_BO_WC)
42                 vma->vm_page_prot =
43                         pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
44         else
45                 vma->vm_page_prot =
46                         pgprot_noncached(vm_get_page_prot(vma->vm_flags));
47 }
48
49 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
50 {
51         /* TODO */
52
53         return roundup(size, PAGE_SIZE);
54 }
55
56 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
57                                         struct drm_file *file_priv,
58                                         unsigned int *handle)
59 {
60         int ret;
61
62         /*
63          * allocate a id of idr table where the obj is registered
64          * and handle has the id what user can see.
65          */
66         ret = drm_gem_handle_create(file_priv, obj, handle);
67         if (ret)
68                 return ret;
69
70         DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
71
72         /* drop reference from allocate - handle holds it now. */
73         drm_gem_object_unreference_unlocked(obj);
74
75         return 0;
76 }
77
78 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
79 {
80         struct drm_gem_object *obj;
81         struct exynos_drm_gem_buf *buf;
82
83         obj = &exynos_gem_obj->base;
84         buf = exynos_gem_obj->buffer;
85
86         DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
87
88         /*
89          * do not release memory region from exporter.
90          *
91          * the region will be released by exporter
92          * once dmabuf's refcount becomes 0.
93          */
94         if (obj->import_attach)
95                 goto out;
96
97         exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
98
99 out:
100         exynos_drm_fini_buf(obj->dev, buf);
101         exynos_gem_obj->buffer = NULL;
102
103         drm_gem_free_mmap_offset(obj);
104
105         /* release file pointer to gem object. */
106         drm_gem_object_release(obj);
107
108         kfree(exynos_gem_obj);
109         exynos_gem_obj = NULL;
110 }
111
112 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
113                                                 unsigned int gem_handle,
114                                                 struct drm_file *file_priv)
115 {
116         struct exynos_drm_gem_obj *exynos_gem_obj;
117         struct drm_gem_object *obj;
118
119         obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
120         if (!obj) {
121                 DRM_ERROR("failed to lookup gem object.\n");
122                 return 0;
123         }
124
125         exynos_gem_obj = to_exynos_gem_obj(obj);
126
127         drm_gem_object_unreference_unlocked(obj);
128
129         return exynos_gem_obj->buffer->size;
130 }
131
132
133 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
134                                                       unsigned long size)
135 {
136         struct exynos_drm_gem_obj *exynos_gem_obj;
137         struct drm_gem_object *obj;
138         int ret;
139
140         exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
141         if (!exynos_gem_obj)
142                 return NULL;
143
144         exynos_gem_obj->size = size;
145         obj = &exynos_gem_obj->base;
146
147         ret = drm_gem_object_init(dev, obj, size);
148         if (ret < 0) {
149                 DRM_ERROR("failed to initialize gem object\n");
150                 kfree(exynos_gem_obj);
151                 return NULL;
152         }
153
154         DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
155
156         return exynos_gem_obj;
157 }
158
159 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
160                                                 unsigned int flags,
161                                                 unsigned long size)
162 {
163         struct exynos_drm_gem_obj *exynos_gem_obj;
164         struct exynos_drm_gem_buf *buf;
165         int ret;
166
167         if (!size) {
168                 DRM_ERROR("invalid size.\n");
169                 return ERR_PTR(-EINVAL);
170         }
171
172         size = roundup_gem_size(size, flags);
173
174         ret = check_gem_flags(flags);
175         if (ret)
176                 return ERR_PTR(ret);
177
178         buf = exynos_drm_init_buf(dev, size);
179         if (!buf)
180                 return ERR_PTR(-ENOMEM);
181
182         exynos_gem_obj = exynos_drm_gem_init(dev, size);
183         if (!exynos_gem_obj) {
184                 ret = -ENOMEM;
185                 goto err_fini_buf;
186         }
187
188         exynos_gem_obj->buffer = buf;
189
190         /* set memory type and cache attribute from user side. */
191         exynos_gem_obj->flags = flags;
192
193         ret = exynos_drm_alloc_buf(dev, buf, flags);
194         if (ret < 0)
195                 goto err_gem_fini;
196
197         return exynos_gem_obj;
198
199 err_gem_fini:
200         drm_gem_object_release(&exynos_gem_obj->base);
201         kfree(exynos_gem_obj);
202 err_fini_buf:
203         exynos_drm_fini_buf(dev, buf);
204         return ERR_PTR(ret);
205 }
206
207 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
208                                 struct drm_file *file_priv)
209 {
210         struct drm_exynos_gem_create *args = data;
211         struct exynos_drm_gem_obj *exynos_gem_obj;
212         int ret;
213
214         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
215         if (IS_ERR(exynos_gem_obj))
216                 return PTR_ERR(exynos_gem_obj);
217
218         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
219                         &args->handle);
220         if (ret) {
221                 exynos_drm_gem_destroy(exynos_gem_obj);
222                 return ret;
223         }
224
225         return 0;
226 }
227
228 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
229                                         unsigned int gem_handle,
230                                         struct drm_file *filp)
231 {
232         struct exynos_drm_gem_obj *exynos_gem_obj;
233         struct drm_gem_object *obj;
234
235         obj = drm_gem_object_lookup(dev, filp, gem_handle);
236         if (!obj) {
237                 DRM_ERROR("failed to lookup gem object.\n");
238                 return ERR_PTR(-EINVAL);
239         }
240
241         exynos_gem_obj = to_exynos_gem_obj(obj);
242
243         return &exynos_gem_obj->buffer->dma_addr;
244 }
245
246 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
247                                         unsigned int gem_handle,
248                                         struct drm_file *filp)
249 {
250         struct drm_gem_object *obj;
251
252         obj = drm_gem_object_lookup(dev, filp, gem_handle);
253         if (!obj) {
254                 DRM_ERROR("failed to lookup gem object.\n");
255                 return;
256         }
257
258         drm_gem_object_unreference_unlocked(obj);
259
260         /*
261          * decrease obj->refcount one more time because we has already
262          * increased it at exynos_drm_gem_get_dma_addr().
263          */
264         drm_gem_object_unreference_unlocked(obj);
265 }
266
267 int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
268                                       struct vm_area_struct *vma)
269 {
270         struct drm_device *drm_dev = exynos_gem_obj->base.dev;
271         struct exynos_drm_gem_buf *buffer;
272         unsigned long vm_size;
273         int ret;
274
275         vma->vm_flags &= ~VM_PFNMAP;
276         vma->vm_pgoff = 0;
277
278         vm_size = vma->vm_end - vma->vm_start;
279
280         /*
281          * a buffer contains information to physically continuous memory
282          * allocated by user request or at framebuffer creation.
283          */
284         buffer = exynos_gem_obj->buffer;
285
286         /* check if user-requested size is valid. */
287         if (vm_size > buffer->size)
288                 return -EINVAL;
289
290         ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
291                                 buffer->dma_addr, buffer->size,
292                                 &buffer->dma_attrs);
293         if (ret < 0) {
294                 DRM_ERROR("failed to mmap.\n");
295                 return ret;
296         }
297
298         return 0;
299 }
300
301 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
302                                       struct drm_file *file_priv)
303 {       struct exynos_drm_gem_obj *exynos_gem_obj;
304         struct drm_exynos_gem_info *args = data;
305         struct drm_gem_object *obj;
306
307         mutex_lock(&dev->struct_mutex);
308
309         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
310         if (!obj) {
311                 DRM_ERROR("failed to lookup gem object.\n");
312                 mutex_unlock(&dev->struct_mutex);
313                 return -EINVAL;
314         }
315
316         exynos_gem_obj = to_exynos_gem_obj(obj);
317
318         args->flags = exynos_gem_obj->flags;
319         args->size = exynos_gem_obj->size;
320
321         drm_gem_object_unreference(obj);
322         mutex_unlock(&dev->struct_mutex);
323
324         return 0;
325 }
326
327 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
328 {
329         struct vm_area_struct *vma_copy;
330
331         vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
332         if (!vma_copy)
333                 return NULL;
334
335         if (vma->vm_ops && vma->vm_ops->open)
336                 vma->vm_ops->open(vma);
337
338         if (vma->vm_file)
339                 get_file(vma->vm_file);
340
341         memcpy(vma_copy, vma, sizeof(*vma));
342
343         vma_copy->vm_mm = NULL;
344         vma_copy->vm_next = NULL;
345         vma_copy->vm_prev = NULL;
346
347         return vma_copy;
348 }
349
350 void exynos_gem_put_vma(struct vm_area_struct *vma)
351 {
352         if (!vma)
353                 return;
354
355         if (vma->vm_ops && vma->vm_ops->close)
356                 vma->vm_ops->close(vma);
357
358         if (vma->vm_file)
359                 fput(vma->vm_file);
360
361         kfree(vma);
362 }
363
364 int exynos_gem_get_pages_from_userptr(unsigned long start,
365                                                 unsigned int npages,
366                                                 struct page **pages,
367                                                 struct vm_area_struct *vma)
368 {
369         int get_npages;
370
371         /* the memory region mmaped with VM_PFNMAP. */
372         if (vma_is_io(vma)) {
373                 unsigned int i;
374
375                 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
376                         unsigned long pfn;
377                         int ret = follow_pfn(vma, start, &pfn);
378                         if (ret)
379                                 return ret;
380
381                         pages[i] = pfn_to_page(pfn);
382                 }
383
384                 if (i != npages) {
385                         DRM_ERROR("failed to get user_pages.\n");
386                         return -EINVAL;
387                 }
388
389                 return 0;
390         }
391
392         get_npages = get_user_pages(current, current->mm, start,
393                                         npages, 1, 1, pages, NULL);
394         get_npages = max(get_npages, 0);
395         if (get_npages != npages) {
396                 DRM_ERROR("failed to get user_pages.\n");
397                 while (get_npages)
398                         put_page(pages[--get_npages]);
399                 return -EFAULT;
400         }
401
402         return 0;
403 }
404
405 void exynos_gem_put_pages_to_userptr(struct page **pages,
406                                         unsigned int npages,
407                                         struct vm_area_struct *vma)
408 {
409         if (!vma_is_io(vma)) {
410                 unsigned int i;
411
412                 for (i = 0; i < npages; i++) {
413                         set_page_dirty_lock(pages[i]);
414
415                         /*
416                          * undo the reference we took when populating
417                          * the table.
418                          */
419                         put_page(pages[i]);
420                 }
421         }
422 }
423
424 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
425                                 struct sg_table *sgt,
426                                 enum dma_data_direction dir)
427 {
428         int nents;
429
430         mutex_lock(&drm_dev->struct_mutex);
431
432         nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
433         if (!nents) {
434                 DRM_ERROR("failed to map sgl with dma.\n");
435                 mutex_unlock(&drm_dev->struct_mutex);
436                 return nents;
437         }
438
439         mutex_unlock(&drm_dev->struct_mutex);
440         return 0;
441 }
442
443 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
444                                 struct sg_table *sgt,
445                                 enum dma_data_direction dir)
446 {
447         dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
448 }
449
450 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
451 {
452         struct exynos_drm_gem_obj *exynos_gem_obj;
453         struct exynos_drm_gem_buf *buf;
454
455         exynos_gem_obj = to_exynos_gem_obj(obj);
456         buf = exynos_gem_obj->buffer;
457
458         if (obj->import_attach)
459                 drm_prime_gem_destroy(obj, buf->sgt);
460
461         exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
462 }
463
464 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
465                                struct drm_device *dev,
466                                struct drm_mode_create_dumb *args)
467 {
468         struct exynos_drm_gem_obj *exynos_gem_obj;
469         int ret;
470
471         /*
472          * allocate memory to be used for framebuffer.
473          * - this callback would be called by user application
474          *      with DRM_IOCTL_MODE_CREATE_DUMB command.
475          */
476
477         args->pitch = args->width * ((args->bpp + 7) / 8);
478         args->size = args->pitch * args->height;
479
480         if (is_drm_iommu_supported(dev)) {
481                 exynos_gem_obj = exynos_drm_gem_create(dev,
482                         EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
483                         args->size);
484         } else {
485                 exynos_gem_obj = exynos_drm_gem_create(dev,
486                         EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
487                         args->size);
488         }
489
490         if (IS_ERR(exynos_gem_obj)) {
491                 dev_warn(dev->dev, "FB allocation failed.\n");
492                 return PTR_ERR(exynos_gem_obj);
493         }
494
495         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
496                         &args->handle);
497         if (ret) {
498                 exynos_drm_gem_destroy(exynos_gem_obj);
499                 return ret;
500         }
501
502         return 0;
503 }
504
505 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
506                                    struct drm_device *dev, uint32_t handle,
507                                    uint64_t *offset)
508 {
509         struct drm_gem_object *obj;
510         int ret = 0;
511
512         mutex_lock(&dev->struct_mutex);
513
514         /*
515          * get offset of memory allocated for drm framebuffer.
516          * - this callback would be called by user application
517          *      with DRM_IOCTL_MODE_MAP_DUMB command.
518          */
519
520         obj = drm_gem_object_lookup(dev, file_priv, handle);
521         if (!obj) {
522                 DRM_ERROR("failed to lookup gem object.\n");
523                 ret = -EINVAL;
524                 goto unlock;
525         }
526
527         ret = drm_gem_create_mmap_offset(obj);
528         if (ret)
529                 goto out;
530
531         *offset = drm_vma_node_offset_addr(&obj->vma_node);
532         DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
533
534 out:
535         drm_gem_object_unreference(obj);
536 unlock:
537         mutex_unlock(&dev->struct_mutex);
538         return ret;
539 }
540
541 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
542 {
543         struct drm_gem_object *obj = vma->vm_private_data;
544         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
545         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
546         unsigned long pfn;
547         pgoff_t page_offset;
548         int ret;
549
550         page_offset = ((unsigned long)vmf->virtual_address -
551                         vma->vm_start) >> PAGE_SHIFT;
552
553         if (page_offset >= (buf->size >> PAGE_SHIFT)) {
554                 DRM_ERROR("invalid page offset\n");
555                 ret = -EINVAL;
556                 goto out;
557         }
558
559         pfn = page_to_pfn(buf->pages[page_offset]);
560         ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
561
562 out:
563         switch (ret) {
564         case 0:
565         case -ERESTARTSYS:
566         case -EINTR:
567                 return VM_FAULT_NOPAGE;
568         case -ENOMEM:
569                 return VM_FAULT_OOM;
570         default:
571                 return VM_FAULT_SIGBUS;
572         }
573 }
574
575 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
576 {
577         struct exynos_drm_gem_obj *exynos_gem_obj;
578         struct drm_gem_object *obj;
579         int ret;
580
581         /* set vm_area_struct. */
582         ret = drm_gem_mmap(filp, vma);
583         if (ret < 0) {
584                 DRM_ERROR("failed to mmap.\n");
585                 return ret;
586         }
587
588         obj = vma->vm_private_data;
589         exynos_gem_obj = to_exynos_gem_obj(obj);
590
591         ret = check_gem_flags(exynos_gem_obj->flags);
592         if (ret)
593                 goto err_close_vm;
594
595         update_vm_cache_attr(exynos_gem_obj, vma);
596
597         ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
598         if (ret)
599                 goto err_close_vm;
600
601         return ret;
602
603 err_close_vm:
604         drm_gem_vm_close(vma);
605         drm_gem_free_mmap_offset(obj);
606
607         return ret;
608 }