]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/exynos/exynos_drm_gem.c
Merge branch 'x86-vdso-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[karo-tx-linux.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2  *
3  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4  * Author: Inki Dae <inki.dae@samsung.com>
5  *
6  * This program is free software; you can redistribute  it and/or modify it
7  * under  the terms of  the GNU General  Public License as published by the
8  * Free Software Foundation;  either version 2 of the  License, or (at your
9  * option) any later version.
10  */
11
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14
15 #include <linux/shmem_fs.h>
16 #include <drm/exynos_drm.h>
17
18 #include "exynos_drm_drv.h"
19 #include "exynos_drm_gem.h"
20 #include "exynos_drm_buf.h"
21 #include "exynos_drm_iommu.h"
22
23 static unsigned int convert_to_vm_err_msg(int msg)
24 {
25         unsigned int out_msg;
26
27         switch (msg) {
28         case 0:
29         case -ERESTARTSYS:
30         case -EINTR:
31                 out_msg = VM_FAULT_NOPAGE;
32                 break;
33
34         case -ENOMEM:
35                 out_msg = VM_FAULT_OOM;
36                 break;
37
38         default:
39                 out_msg = VM_FAULT_SIGBUS;
40                 break;
41         }
42
43         return out_msg;
44 }
45
46 static int check_gem_flags(unsigned int flags)
47 {
48         if (flags & ~(EXYNOS_BO_MASK)) {
49                 DRM_ERROR("invalid flags.\n");
50                 return -EINVAL;
51         }
52
53         return 0;
54 }
55
56 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
57                                         struct vm_area_struct *vma)
58 {
59         DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
60
61         /* non-cachable as default. */
62         if (obj->flags & EXYNOS_BO_CACHABLE)
63                 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64         else if (obj->flags & EXYNOS_BO_WC)
65                 vma->vm_page_prot =
66                         pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
67         else
68                 vma->vm_page_prot =
69                         pgprot_noncached(vm_get_page_prot(vma->vm_flags));
70 }
71
72 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
73 {
74         /* TODO */
75
76         return roundup(size, PAGE_SIZE);
77 }
78
79 static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
80                                         struct vm_area_struct *vma,
81                                         unsigned long f_vaddr,
82                                         pgoff_t page_offset)
83 {
84         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
85         struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
86         struct scatterlist *sgl;
87         unsigned long pfn;
88         int i;
89
90         if (!buf->sgt)
91                 return -EINTR;
92
93         if (page_offset >= (buf->size >> PAGE_SHIFT)) {
94                 DRM_ERROR("invalid page offset\n");
95                 return -EINVAL;
96         }
97
98         sgl = buf->sgt->sgl;
99         for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
100                 if (page_offset < (sgl->length >> PAGE_SHIFT))
101                         break;
102                 page_offset -=  (sgl->length >> PAGE_SHIFT);
103         }
104
105         pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
106
107         return vm_insert_mixed(vma, f_vaddr, pfn);
108 }
109
110 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
111                                         struct drm_file *file_priv,
112                                         unsigned int *handle)
113 {
114         int ret;
115
116         /*
117          * allocate a id of idr table where the obj is registered
118          * and handle has the id what user can see.
119          */
120         ret = drm_gem_handle_create(file_priv, obj, handle);
121         if (ret)
122                 return ret;
123
124         DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
125
126         /* drop reference from allocate - handle holds it now. */
127         drm_gem_object_unreference_unlocked(obj);
128
129         return 0;
130 }
131
132 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
133 {
134         struct drm_gem_object *obj;
135         struct exynos_drm_gem_buf *buf;
136
137         obj = &exynos_gem_obj->base;
138         buf = exynos_gem_obj->buffer;
139
140         DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
141
142         /*
143          * do not release memory region from exporter.
144          *
145          * the region will be released by exporter
146          * once dmabuf's refcount becomes 0.
147          */
148         if (obj->import_attach)
149                 goto out;
150
151         exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
152
153 out:
154         exynos_drm_fini_buf(obj->dev, buf);
155         exynos_gem_obj->buffer = NULL;
156
157         drm_gem_free_mmap_offset(obj);
158
159         /* release file pointer to gem object. */
160         drm_gem_object_release(obj);
161
162         kfree(exynos_gem_obj);
163         exynos_gem_obj = NULL;
164 }
165
166 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
167                                                 unsigned int gem_handle,
168                                                 struct drm_file *file_priv)
169 {
170         struct exynos_drm_gem_obj *exynos_gem_obj;
171         struct drm_gem_object *obj;
172
173         obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
174         if (!obj) {
175                 DRM_ERROR("failed to lookup gem object.\n");
176                 return 0;
177         }
178
179         exynos_gem_obj = to_exynos_gem_obj(obj);
180
181         drm_gem_object_unreference_unlocked(obj);
182
183         return exynos_gem_obj->buffer->size;
184 }
185
186
187 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
188                                                       unsigned long size)
189 {
190         struct exynos_drm_gem_obj *exynos_gem_obj;
191         struct drm_gem_object *obj;
192         int ret;
193
194         exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
195         if (!exynos_gem_obj)
196                 return NULL;
197
198         exynos_gem_obj->size = size;
199         obj = &exynos_gem_obj->base;
200
201         ret = drm_gem_object_init(dev, obj, size);
202         if (ret < 0) {
203                 DRM_ERROR("failed to initialize gem object\n");
204                 kfree(exynos_gem_obj);
205                 return NULL;
206         }
207
208         DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
209
210         return exynos_gem_obj;
211 }
212
213 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
214                                                 unsigned int flags,
215                                                 unsigned long size)
216 {
217         struct exynos_drm_gem_obj *exynos_gem_obj;
218         struct exynos_drm_gem_buf *buf;
219         int ret;
220
221         if (!size) {
222                 DRM_ERROR("invalid size.\n");
223                 return ERR_PTR(-EINVAL);
224         }
225
226         size = roundup_gem_size(size, flags);
227
228         ret = check_gem_flags(flags);
229         if (ret)
230                 return ERR_PTR(ret);
231
232         buf = exynos_drm_init_buf(dev, size);
233         if (!buf)
234                 return ERR_PTR(-ENOMEM);
235
236         exynos_gem_obj = exynos_drm_gem_init(dev, size);
237         if (!exynos_gem_obj) {
238                 ret = -ENOMEM;
239                 goto err_fini_buf;
240         }
241
242         exynos_gem_obj->buffer = buf;
243
244         /* set memory type and cache attribute from user side. */
245         exynos_gem_obj->flags = flags;
246
247         ret = exynos_drm_alloc_buf(dev, buf, flags);
248         if (ret < 0)
249                 goto err_gem_fini;
250
251         return exynos_gem_obj;
252
253 err_gem_fini:
254         drm_gem_object_release(&exynos_gem_obj->base);
255         kfree(exynos_gem_obj);
256 err_fini_buf:
257         exynos_drm_fini_buf(dev, buf);
258         return ERR_PTR(ret);
259 }
260
261 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
262                                 struct drm_file *file_priv)
263 {
264         struct drm_exynos_gem_create *args = data;
265         struct exynos_drm_gem_obj *exynos_gem_obj;
266         int ret;
267
268         exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
269         if (IS_ERR(exynos_gem_obj))
270                 return PTR_ERR(exynos_gem_obj);
271
272         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
273                         &args->handle);
274         if (ret) {
275                 exynos_drm_gem_destroy(exynos_gem_obj);
276                 return ret;
277         }
278
279         return 0;
280 }
281
282 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
283                                         unsigned int gem_handle,
284                                         struct drm_file *filp)
285 {
286         struct exynos_drm_gem_obj *exynos_gem_obj;
287         struct drm_gem_object *obj;
288
289         obj = drm_gem_object_lookup(dev, filp, gem_handle);
290         if (!obj) {
291                 DRM_ERROR("failed to lookup gem object.\n");
292                 return ERR_PTR(-EINVAL);
293         }
294
295         exynos_gem_obj = to_exynos_gem_obj(obj);
296
297         return &exynos_gem_obj->buffer->dma_addr;
298 }
299
300 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
301                                         unsigned int gem_handle,
302                                         struct drm_file *filp)
303 {
304         struct exynos_drm_gem_obj *exynos_gem_obj;
305         struct drm_gem_object *obj;
306
307         obj = drm_gem_object_lookup(dev, filp, gem_handle);
308         if (!obj) {
309                 DRM_ERROR("failed to lookup gem object.\n");
310                 return;
311         }
312
313         exynos_gem_obj = to_exynos_gem_obj(obj);
314
315         drm_gem_object_unreference_unlocked(obj);
316
317         /*
318          * decrease obj->refcount one more time because we has already
319          * increased it at exynos_drm_gem_get_dma_addr().
320          */
321         drm_gem_object_unreference_unlocked(obj);
322 }
323
324 int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
325                                     struct drm_file *file_priv)
326 {
327         struct drm_exynos_gem_map_off *args = data;
328
329         DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
330                         args->handle, (unsigned long)args->offset);
331
332         if (!(dev->driver->driver_features & DRIVER_GEM)) {
333                 DRM_ERROR("does not support GEM.\n");
334                 return -ENODEV;
335         }
336
337         return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
338                         &args->offset);
339 }
340
341 int exynos_drm_gem_mmap_buffer(struct file *filp,
342                                       struct vm_area_struct *vma)
343 {
344         struct drm_gem_object *obj = filp->private_data;
345         struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
346         struct drm_device *drm_dev = obj->dev;
347         struct exynos_drm_gem_buf *buffer;
348         unsigned long vm_size;
349         int ret;
350
351         WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
352
353         vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
354         vma->vm_private_data = obj;
355         vma->vm_ops = drm_dev->driver->gem_vm_ops;
356
357         update_vm_cache_attr(exynos_gem_obj, vma);
358
359         vm_size = vma->vm_end - vma->vm_start;
360
361         /*
362          * a buffer contains information to physically continuous memory
363          * allocated by user request or at framebuffer creation.
364          */
365         buffer = exynos_gem_obj->buffer;
366
367         /* check if user-requested size is valid. */
368         if (vm_size > buffer->size)
369                 return -EINVAL;
370
371         ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
372                                 buffer->dma_addr, buffer->size,
373                                 &buffer->dma_attrs);
374         if (ret < 0) {
375                 DRM_ERROR("failed to mmap.\n");
376                 return ret;
377         }
378
379         /*
380          * take a reference to this mapping of the object. And this reference
381          * is unreferenced by the corresponding vm_close call.
382          */
383         drm_gem_object_reference(obj);
384
385         drm_vm_open_locked(drm_dev, vma);
386
387         return 0;
388 }
389
390 int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
391                               struct drm_file *file_priv)
392 {
393         struct drm_exynos_file_private *exynos_file_priv;
394         struct drm_exynos_gem_mmap *args = data;
395         struct drm_gem_object *obj;
396         struct file *anon_filp;
397         unsigned long addr;
398
399         if (!(dev->driver->driver_features & DRIVER_GEM)) {
400                 DRM_ERROR("does not support GEM.\n");
401                 return -ENODEV;
402         }
403
404         mutex_lock(&dev->struct_mutex);
405
406         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
407         if (!obj) {
408                 DRM_ERROR("failed to lookup gem object.\n");
409                 mutex_unlock(&dev->struct_mutex);
410                 return -EINVAL;
411         }
412
413         exynos_file_priv = file_priv->driver_priv;
414         anon_filp = exynos_file_priv->anon_filp;
415         anon_filp->private_data = obj;
416
417         addr = vm_mmap(anon_filp, 0, args->size, PROT_READ | PROT_WRITE,
418                         MAP_SHARED, 0);
419
420         drm_gem_object_unreference(obj);
421
422         if (IS_ERR_VALUE(addr)) {
423                 mutex_unlock(&dev->struct_mutex);
424                 return (int)addr;
425         }
426
427         mutex_unlock(&dev->struct_mutex);
428
429         args->mapped = addr;
430
431         DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
432
433         return 0;
434 }
435
436 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
437                                       struct drm_file *file_priv)
438 {       struct exynos_drm_gem_obj *exynos_gem_obj;
439         struct drm_exynos_gem_info *args = data;
440         struct drm_gem_object *obj;
441
442         mutex_lock(&dev->struct_mutex);
443
444         obj = drm_gem_object_lookup(dev, file_priv, args->handle);
445         if (!obj) {
446                 DRM_ERROR("failed to lookup gem object.\n");
447                 mutex_unlock(&dev->struct_mutex);
448                 return -EINVAL;
449         }
450
451         exynos_gem_obj = to_exynos_gem_obj(obj);
452
453         args->flags = exynos_gem_obj->flags;
454         args->size = exynos_gem_obj->size;
455
456         drm_gem_object_unreference(obj);
457         mutex_unlock(&dev->struct_mutex);
458
459         return 0;
460 }
461
462 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
463 {
464         struct vm_area_struct *vma_copy;
465
466         vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
467         if (!vma_copy)
468                 return NULL;
469
470         if (vma->vm_ops && vma->vm_ops->open)
471                 vma->vm_ops->open(vma);
472
473         if (vma->vm_file)
474                 get_file(vma->vm_file);
475
476         memcpy(vma_copy, vma, sizeof(*vma));
477
478         vma_copy->vm_mm = NULL;
479         vma_copy->vm_next = NULL;
480         vma_copy->vm_prev = NULL;
481
482         return vma_copy;
483 }
484
485 void exynos_gem_put_vma(struct vm_area_struct *vma)
486 {
487         if (!vma)
488                 return;
489
490         if (vma->vm_ops && vma->vm_ops->close)
491                 vma->vm_ops->close(vma);
492
493         if (vma->vm_file)
494                 fput(vma->vm_file);
495
496         kfree(vma);
497 }
498
499 int exynos_gem_get_pages_from_userptr(unsigned long start,
500                                                 unsigned int npages,
501                                                 struct page **pages,
502                                                 struct vm_area_struct *vma)
503 {
504         int get_npages;
505
506         /* the memory region mmaped with VM_PFNMAP. */
507         if (vma_is_io(vma)) {
508                 unsigned int i;
509
510                 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
511                         unsigned long pfn;
512                         int ret = follow_pfn(vma, start, &pfn);
513                         if (ret)
514                                 return ret;
515
516                         pages[i] = pfn_to_page(pfn);
517                 }
518
519                 if (i != npages) {
520                         DRM_ERROR("failed to get user_pages.\n");
521                         return -EINVAL;
522                 }
523
524                 return 0;
525         }
526
527         get_npages = get_user_pages(current, current->mm, start,
528                                         npages, 1, 1, pages, NULL);
529         get_npages = max(get_npages, 0);
530         if (get_npages != npages) {
531                 DRM_ERROR("failed to get user_pages.\n");
532                 while (get_npages)
533                         put_page(pages[--get_npages]);
534                 return -EFAULT;
535         }
536
537         return 0;
538 }
539
540 void exynos_gem_put_pages_to_userptr(struct page **pages,
541                                         unsigned int npages,
542                                         struct vm_area_struct *vma)
543 {
544         if (!vma_is_io(vma)) {
545                 unsigned int i;
546
547                 for (i = 0; i < npages; i++) {
548                         set_page_dirty_lock(pages[i]);
549
550                         /*
551                          * undo the reference we took when populating
552                          * the table.
553                          */
554                         put_page(pages[i]);
555                 }
556         }
557 }
558
559 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
560                                 struct sg_table *sgt,
561                                 enum dma_data_direction dir)
562 {
563         int nents;
564
565         mutex_lock(&drm_dev->struct_mutex);
566
567         nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
568         if (!nents) {
569                 DRM_ERROR("failed to map sgl with dma.\n");
570                 mutex_unlock(&drm_dev->struct_mutex);
571                 return nents;
572         }
573
574         mutex_unlock(&drm_dev->struct_mutex);
575         return 0;
576 }
577
578 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
579                                 struct sg_table *sgt,
580                                 enum dma_data_direction dir)
581 {
582         dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
583 }
584
585 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
586 {
587         struct exynos_drm_gem_obj *exynos_gem_obj;
588         struct exynos_drm_gem_buf *buf;
589
590         exynos_gem_obj = to_exynos_gem_obj(obj);
591         buf = exynos_gem_obj->buffer;
592
593         if (obj->import_attach)
594                 drm_prime_gem_destroy(obj, buf->sgt);
595
596         exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
597 }
598
599 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
600                                struct drm_device *dev,
601                                struct drm_mode_create_dumb *args)
602 {
603         struct exynos_drm_gem_obj *exynos_gem_obj;
604         int ret;
605
606         /*
607          * allocate memory to be used for framebuffer.
608          * - this callback would be called by user application
609          *      with DRM_IOCTL_MODE_CREATE_DUMB command.
610          */
611
612         args->pitch = args->width * ((args->bpp + 7) / 8);
613         args->size = args->pitch * args->height;
614
615         exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
616                                                 EXYNOS_BO_WC, args->size);
617         /*
618          * If physically contiguous memory allocation fails and if IOMMU is
619          * supported then try to get buffer from non physically contiguous
620          * memory area.
621          */
622         if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
623                 dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
624                 exynos_gem_obj = exynos_drm_gem_create(dev,
625                                         EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
626                                         args->size);
627         }
628
629         if (IS_ERR(exynos_gem_obj))
630                 return PTR_ERR(exynos_gem_obj);
631
632         ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
633                         &args->handle);
634         if (ret) {
635                 exynos_drm_gem_destroy(exynos_gem_obj);
636                 return ret;
637         }
638
639         return 0;
640 }
641
642 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
643                                    struct drm_device *dev, uint32_t handle,
644                                    uint64_t *offset)
645 {
646         struct drm_gem_object *obj;
647         int ret = 0;
648
649         mutex_lock(&dev->struct_mutex);
650
651         /*
652          * get offset of memory allocated for drm framebuffer.
653          * - this callback would be called by user application
654          *      with DRM_IOCTL_MODE_MAP_DUMB command.
655          */
656
657         obj = drm_gem_object_lookup(dev, file_priv, handle);
658         if (!obj) {
659                 DRM_ERROR("failed to lookup gem object.\n");
660                 ret = -EINVAL;
661                 goto unlock;
662         }
663
664         ret = drm_gem_create_mmap_offset(obj);
665         if (ret)
666                 goto out;
667
668         *offset = drm_vma_node_offset_addr(&obj->vma_node);
669         DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
670
671 out:
672         drm_gem_object_unreference(obj);
673 unlock:
674         mutex_unlock(&dev->struct_mutex);
675         return ret;
676 }
677
678 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
679 {
680         struct drm_gem_object *obj = vma->vm_private_data;
681         struct drm_device *dev = obj->dev;
682         unsigned long f_vaddr;
683         pgoff_t page_offset;
684         int ret;
685
686         page_offset = ((unsigned long)vmf->virtual_address -
687                         vma->vm_start) >> PAGE_SHIFT;
688         f_vaddr = (unsigned long)vmf->virtual_address;
689
690         mutex_lock(&dev->struct_mutex);
691
692         ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
693         if (ret < 0)
694                 DRM_ERROR("failed to map a buffer with user.\n");
695
696         mutex_unlock(&dev->struct_mutex);
697
698         return convert_to_vm_err_msg(ret);
699 }
700
701 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
702 {
703         struct exynos_drm_gem_obj *exynos_gem_obj;
704         struct drm_gem_object *obj;
705         int ret;
706
707         /* set vm_area_struct. */
708         ret = drm_gem_mmap(filp, vma);
709         if (ret < 0) {
710                 DRM_ERROR("failed to mmap.\n");
711                 return ret;
712         }
713
714         obj = vma->vm_private_data;
715         exynos_gem_obj = to_exynos_gem_obj(obj);
716
717         ret = check_gem_flags(exynos_gem_obj->flags);
718         if (ret) {
719                 drm_gem_vm_close(vma);
720                 drm_gem_free_mmap_offset(obj);
721                 return ret;
722         }
723
724         vma->vm_flags &= ~VM_PFNMAP;
725         vma->vm_flags |= VM_MIXEDMAP;
726
727         update_vm_cache_attr(exynos_gem_obj, vma);
728
729         return ret;
730 }