2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
25 module_param(debug, int, 0644);
27 #define dprintk(level, fmt, arg...) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
33 struct vb2_dma_sg_buf {
38 struct vb2_dma_sg_desc sg_desc;
40 struct vb2_vmarea_handler handler;
43 static void vb2_dma_sg_put(void *buf_priv);
45 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
48 unsigned int last_page = 0;
49 int size = buf->sg_desc.size;
56 order = get_order(size);
57 /* Dont over allocate*/
58 if ((PAGE_SIZE << order) > size)
63 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
64 __GFP_NOWARN | gfp_flags, order);
70 __free_page(buf->pages[last_page]);
76 split_page(pages, order);
77 for (i = 0; i < (1 << order); i++) {
78 buf->pages[last_page] = &pages[i];
79 sg_set_page(&buf->sg_desc.sglist[last_page],
80 buf->pages[last_page], PAGE_SIZE, 0);
84 size -= PAGE_SIZE << order;
90 static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
92 struct vb2_dma_sg_buf *buf;
95 buf = kzalloc(sizeof *buf, GFP_KERNEL);
102 buf->sg_desc.size = size;
103 /* size is already page aligned */
104 buf->sg_desc.num_pages = size >> PAGE_SHIFT;
106 buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
107 sizeof(*buf->sg_desc.sglist));
108 if (!buf->sg_desc.sglist)
109 goto fail_sglist_alloc;
110 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
112 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
115 goto fail_pages_array_alloc;
117 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
119 goto fail_pages_alloc;
121 buf->handler.refcount = &buf->refcount;
122 buf->handler.put = vb2_dma_sg_put;
123 buf->handler.arg = buf;
125 atomic_inc(&buf->refcount);
127 dprintk(1, "%s: Allocated buffer of %d pages\n",
128 __func__, buf->sg_desc.num_pages);
134 fail_pages_array_alloc:
135 vfree(buf->sg_desc.sglist);
142 static void vb2_dma_sg_put(void *buf_priv)
144 struct vb2_dma_sg_buf *buf = buf_priv;
145 int i = buf->sg_desc.num_pages;
147 if (atomic_dec_and_test(&buf->refcount)) {
148 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
149 buf->sg_desc.num_pages);
151 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
152 vfree(buf->sg_desc.sglist);
154 __free_page(buf->pages[i]);
160 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
161 unsigned long size, int write)
163 struct vb2_dma_sg_buf *buf;
164 unsigned long first, last;
165 int num_pages_from_user, i;
167 buf = kzalloc(sizeof *buf, GFP_KERNEL);
173 buf->offset = vaddr & ~PAGE_MASK;
174 buf->sg_desc.size = size;
176 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
177 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
178 buf->sg_desc.num_pages = last - first + 1;
180 buf->sg_desc.sglist = vzalloc(
181 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
182 if (!buf->sg_desc.sglist)
183 goto userptr_fail_sglist_alloc;
185 sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
187 buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
190 goto userptr_fail_pages_array_alloc;
192 num_pages_from_user = get_user_pages(current, current->mm,
194 buf->sg_desc.num_pages,
200 if (num_pages_from_user != buf->sg_desc.num_pages)
201 goto userptr_fail_get_user_pages;
203 sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
204 PAGE_SIZE - buf->offset, buf->offset);
205 size -= PAGE_SIZE - buf->offset;
206 for (i = 1; i < buf->sg_desc.num_pages; ++i) {
207 sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
208 min_t(size_t, PAGE_SIZE, size), 0);
209 size -= min_t(size_t, PAGE_SIZE, size);
213 userptr_fail_get_user_pages:
214 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
215 num_pages_from_user, buf->sg_desc.num_pages);
216 while (--num_pages_from_user >= 0)
217 put_page(buf->pages[num_pages_from_user]);
220 userptr_fail_pages_array_alloc:
221 vfree(buf->sg_desc.sglist);
223 userptr_fail_sglist_alloc:
229 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
232 static void vb2_dma_sg_put_userptr(void *buf_priv)
234 struct vb2_dma_sg_buf *buf = buf_priv;
235 int i = buf->sg_desc.num_pages;
237 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
238 __func__, buf->sg_desc.num_pages);
240 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
243 set_page_dirty_lock(buf->pages[i]);
244 put_page(buf->pages[i]);
246 vfree(buf->sg_desc.sglist);
251 static void *vb2_dma_sg_vaddr(void *buf_priv)
253 struct vb2_dma_sg_buf *buf = buf_priv;
258 buf->vaddr = vm_map_ram(buf->pages,
259 buf->sg_desc.num_pages,
263 /* add offset in case userptr is not page-aligned */
264 return buf->vaddr + buf->offset;
267 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
269 struct vb2_dma_sg_buf *buf = buf_priv;
271 return atomic_read(&buf->refcount);
274 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
276 struct vb2_dma_sg_buf *buf = buf_priv;
277 unsigned long uaddr = vma->vm_start;
278 unsigned long usize = vma->vm_end - vma->vm_start;
282 printk(KERN_ERR "No memory to map\n");
289 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
291 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
301 * Use common vm_area operations to track buffer refcount.
303 vma->vm_private_data = &buf->handler;
304 vma->vm_ops = &vb2_common_vm_ops;
306 vma->vm_ops->open(vma);
311 static void *vb2_dma_sg_cookie(void *buf_priv)
313 struct vb2_dma_sg_buf *buf = buf_priv;
315 return &buf->sg_desc;
318 const struct vb2_mem_ops vb2_dma_sg_memops = {
319 .alloc = vb2_dma_sg_alloc,
320 .put = vb2_dma_sg_put,
321 .get_userptr = vb2_dma_sg_get_userptr,
322 .put_userptr = vb2_dma_sg_put_userptr,
323 .vaddr = vb2_dma_sg_vaddr,
324 .mmap = vb2_dma_sg_mmap,
325 .num_users = vb2_dma_sg_num_users,
326 .cookie = vb2_dma_sg_cookie,
328 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
330 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
331 MODULE_AUTHOR("Andrzej Pietrasiewicz");
332 MODULE_LICENSE("GPL");