2 * Copyright (C) 2012 Red Hat
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License v2. See the file COPYING in the main directory of this archive for
11 #include <linux/shmem_fs.h>
12 #include <linux/dma-buf.h>
14 struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
17 struct udl_gem_object *obj;
19 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
23 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
32 udl_gem_create(struct drm_file *file,
33 struct drm_device *dev,
37 struct udl_gem_object *obj;
41 size = roundup(size, PAGE_SIZE);
43 obj = udl_gem_alloc_object(dev, size);
47 ret = drm_gem_handle_create(file, &obj->base, &handle);
49 drm_gem_object_release(&obj->base);
54 drm_gem_object_unreference(&obj->base);
59 int udl_dumb_create(struct drm_file *file,
60 struct drm_device *dev,
61 struct drm_mode_create_dumb *args)
63 args->pitch = args->width * ((args->bpp + 1) / 8);
64 args->size = args->pitch * args->height;
65 return udl_gem_create(file, dev,
66 args->size, &args->handle);
69 int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
72 return drm_gem_handle_delete(file, handle);
75 int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
79 ret = drm_gem_mmap(filp, vma);
83 vma->vm_flags &= ~VM_PFNMAP;
84 vma->vm_flags |= VM_MIXEDMAP;
89 int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
91 struct udl_gem_object *obj = to_udl_bo(vma->vm_private_data);
93 unsigned int page_offset;
96 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
100 return VM_FAULT_SIGBUS;
102 page = obj->pages[page_offset];
103 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
109 return VM_FAULT_NOPAGE;
113 return VM_FAULT_SIGBUS;
117 int udl_gem_init_object(struct drm_gem_object *obj)
124 static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
129 struct address_space *mapping;
134 page_count = obj->base.size / PAGE_SIZE;
135 BUG_ON(obj->pages != NULL);
136 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
137 if (obj->pages == NULL)
140 inode = obj->base.filp->f_path.dentry->d_inode;
141 mapping = inode->i_mapping;
142 gfpmask |= mapping_gfp_mask(mapping);
144 for (i = 0; i < page_count; i++) {
145 page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
148 obj->pages[i] = page;
154 page_cache_release(obj->pages[i]);
155 drm_free_large(obj->pages);
157 return PTR_ERR(page);
160 static void udl_gem_put_pages(struct udl_gem_object *obj)
162 int page_count = obj->base.size / PAGE_SIZE;
165 if (obj->base.import_attach) {
166 drm_free_large(obj->pages);
171 for (i = 0; i < page_count; i++)
172 page_cache_release(obj->pages[i]);
174 drm_free_large(obj->pages);
178 int udl_gem_vmap(struct udl_gem_object *obj)
180 int page_count = obj->base.size / PAGE_SIZE;
183 if (obj->base.import_attach) {
184 ret = dma_buf_begin_cpu_access(obj->base.import_attach->dmabuf,
185 0, obj->base.size, DMA_BIDIRECTIONAL);
189 obj->vmapping = dma_buf_vmap(obj->base.import_attach->dmabuf);
195 ret = udl_gem_get_pages(obj, GFP_KERNEL);
199 obj->vmapping = vmap(obj->pages, page_count, 0, PAGE_KERNEL);
205 void udl_gem_vunmap(struct udl_gem_object *obj)
207 if (obj->base.import_attach) {
208 dma_buf_vunmap(obj->base.import_attach->dmabuf, obj->vmapping);
209 dma_buf_end_cpu_access(obj->base.import_attach->dmabuf, 0,
210 obj->base.size, DMA_BIDIRECTIONAL);
215 vunmap(obj->vmapping);
217 udl_gem_put_pages(obj);
220 void udl_gem_free_object(struct drm_gem_object *gem_obj)
222 struct udl_gem_object *obj = to_udl_bo(gem_obj);
227 if (gem_obj->import_attach)
228 drm_prime_gem_destroy(gem_obj, obj->sg);
231 udl_gem_put_pages(obj);
233 if (gem_obj->map_list.map)
234 drm_gem_free_mmap_offset(gem_obj);
237 /* the dumb interface doesn't work with the GEM straight MMAP
238 interface, it expects to do MMAP on the drm fd, like normal */
239 int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
240 uint32_t handle, uint64_t *offset)
242 struct udl_gem_object *gobj;
243 struct drm_gem_object *obj;
246 mutex_lock(&dev->struct_mutex);
247 obj = drm_gem_object_lookup(dev, file, handle);
252 gobj = to_udl_bo(obj);
254 ret = udl_gem_get_pages(gobj, GFP_KERNEL);
257 if (!gobj->base.map_list.map) {
258 ret = drm_gem_create_mmap_offset(obj);
263 *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
266 drm_gem_object_unreference(&gobj->base);
268 mutex_unlock(&dev->struct_mutex);
272 static int udl_prime_create(struct drm_device *dev,
275 struct udl_gem_object **obj_p)
277 struct udl_gem_object *obj;
280 npages = size / PAGE_SIZE;
283 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
288 obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
289 if (obj->pages == NULL) {
290 DRM_ERROR("obj pages is NULL %d\n", npages);
294 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
300 struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
301 struct dma_buf *dma_buf)
303 struct dma_buf_attachment *attach;
305 struct udl_gem_object *uobj;
309 attach = dma_buf_attach(dma_buf, dev->dev);
311 return ERR_CAST(attach);
313 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
319 ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
324 uobj->base.import_attach = attach;
329 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
331 dma_buf_detach(dma_buf, attach);