]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/media/v4l2-core/videobuf2-dma-sg.c
[media] videobuf2-dma-sg: Allocate pages as contiguous as possible
[karo-tx-linux.git] / drivers / media / v4l2-core / videobuf2-dma-sg.c
1 /*
2  * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
23
24 static int debug;
25 module_param(debug, int, 0644);
26
27 #define dprintk(level, fmt, arg...)                                     \
28         do {                                                            \
29                 if (debug >= level)                                     \
30                         printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);  \
31         } while (0)
32
33 struct vb2_dma_sg_buf {
34         void                            *vaddr;
35         struct page                     **pages;
36         int                             write;
37         int                             offset;
38         struct vb2_dma_sg_desc          sg_desc;
39         atomic_t                        refcount;
40         struct vb2_vmarea_handler       handler;
41 };
42
43 static void vb2_dma_sg_put(void *buf_priv);
44
45 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
46                 gfp_t gfp_flags)
47 {
48         unsigned int last_page = 0;
49         int size = buf->sg_desc.size;
50
51         while (size > 0) {
52                 struct page *pages;
53                 int order;
54                 int i;
55
56                 order = get_order(size);
57                 /* Dont over allocate*/
58                 if ((PAGE_SIZE << order) > size)
59                         order--;
60
61                 pages = NULL;
62                 while (!pages) {
63                         pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
64                                         __GFP_NOWARN | gfp_flags, order);
65                         if (pages)
66                                 break;
67
68                         if (order == 0) {
69                                 while (last_page--)
70                                         __free_page(buf->pages[last_page]);
71                                 return -ENOMEM;
72                         }
73                         order--;
74                 }
75
76                 split_page(pages, order);
77                 for (i = 0; i < (1 << order); i++) {
78                         buf->pages[last_page] = &pages[i];
79                         sg_set_page(&buf->sg_desc.sglist[last_page],
80                                         buf->pages[last_page], PAGE_SIZE, 0);
81                         last_page++;
82                 }
83
84                 size -= PAGE_SIZE << order;
85         }
86
87         return 0;
88 }
89
90 static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
91 {
92         struct vb2_dma_sg_buf *buf;
93         int ret;
94
95         buf = kzalloc(sizeof *buf, GFP_KERNEL);
96         if (!buf)
97                 return NULL;
98
99         buf->vaddr = NULL;
100         buf->write = 0;
101         buf->offset = 0;
102         buf->sg_desc.size = size;
103         /* size is already page aligned */
104         buf->sg_desc.num_pages = size >> PAGE_SHIFT;
105
106         buf->sg_desc.sglist = vzalloc(buf->sg_desc.num_pages *
107                                       sizeof(*buf->sg_desc.sglist));
108         if (!buf->sg_desc.sglist)
109                 goto fail_sglist_alloc;
110         sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
111
112         buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
113                              GFP_KERNEL);
114         if (!buf->pages)
115                 goto fail_pages_array_alloc;
116
117         ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
118         if (ret)
119                 goto fail_pages_alloc;
120
121         buf->handler.refcount = &buf->refcount;
122         buf->handler.put = vb2_dma_sg_put;
123         buf->handler.arg = buf;
124
125         atomic_inc(&buf->refcount);
126
127         dprintk(1, "%s: Allocated buffer of %d pages\n",
128                 __func__, buf->sg_desc.num_pages);
129         return buf;
130
131 fail_pages_alloc:
132         kfree(buf->pages);
133
134 fail_pages_array_alloc:
135         vfree(buf->sg_desc.sglist);
136
137 fail_sglist_alloc:
138         kfree(buf);
139         return NULL;
140 }
141
142 static void vb2_dma_sg_put(void *buf_priv)
143 {
144         struct vb2_dma_sg_buf *buf = buf_priv;
145         int i = buf->sg_desc.num_pages;
146
147         if (atomic_dec_and_test(&buf->refcount)) {
148                 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
149                         buf->sg_desc.num_pages);
150                 if (buf->vaddr)
151                         vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
152                 vfree(buf->sg_desc.sglist);
153                 while (--i >= 0)
154                         __free_page(buf->pages[i]);
155                 kfree(buf->pages);
156                 kfree(buf);
157         }
158 }
159
160 static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
161                                     unsigned long size, int write)
162 {
163         struct vb2_dma_sg_buf *buf;
164         unsigned long first, last;
165         int num_pages_from_user, i;
166
167         buf = kzalloc(sizeof *buf, GFP_KERNEL);
168         if (!buf)
169                 return NULL;
170
171         buf->vaddr = NULL;
172         buf->write = write;
173         buf->offset = vaddr & ~PAGE_MASK;
174         buf->sg_desc.size = size;
175
176         first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
177         last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
178         buf->sg_desc.num_pages = last - first + 1;
179
180         buf->sg_desc.sglist = vzalloc(
181                 buf->sg_desc.num_pages * sizeof(*buf->sg_desc.sglist));
182         if (!buf->sg_desc.sglist)
183                 goto userptr_fail_sglist_alloc;
184
185         sg_init_table(buf->sg_desc.sglist, buf->sg_desc.num_pages);
186
187         buf->pages = kzalloc(buf->sg_desc.num_pages * sizeof(struct page *),
188                              GFP_KERNEL);
189         if (!buf->pages)
190                 goto userptr_fail_pages_array_alloc;
191
192         num_pages_from_user = get_user_pages(current, current->mm,
193                                              vaddr & PAGE_MASK,
194                                              buf->sg_desc.num_pages,
195                                              write,
196                                              1, /* force */
197                                              buf->pages,
198                                              NULL);
199
200         if (num_pages_from_user != buf->sg_desc.num_pages)
201                 goto userptr_fail_get_user_pages;
202
203         sg_set_page(&buf->sg_desc.sglist[0], buf->pages[0],
204                     PAGE_SIZE - buf->offset, buf->offset);
205         size -= PAGE_SIZE - buf->offset;
206         for (i = 1; i < buf->sg_desc.num_pages; ++i) {
207                 sg_set_page(&buf->sg_desc.sglist[i], buf->pages[i],
208                             min_t(size_t, PAGE_SIZE, size), 0);
209                 size -= min_t(size_t, PAGE_SIZE, size);
210         }
211         return buf;
212
213 userptr_fail_get_user_pages:
214         dprintk(1, "get_user_pages requested/got: %d/%d]\n",
215                num_pages_from_user, buf->sg_desc.num_pages);
216         while (--num_pages_from_user >= 0)
217                 put_page(buf->pages[num_pages_from_user]);
218         kfree(buf->pages);
219
220 userptr_fail_pages_array_alloc:
221         vfree(buf->sg_desc.sglist);
222
223 userptr_fail_sglist_alloc:
224         kfree(buf);
225         return NULL;
226 }
227
228 /*
229  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
230  *               be used
231  */
232 static void vb2_dma_sg_put_userptr(void *buf_priv)
233 {
234         struct vb2_dma_sg_buf *buf = buf_priv;
235         int i = buf->sg_desc.num_pages;
236
237         dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
238                __func__, buf->sg_desc.num_pages);
239         if (buf->vaddr)
240                 vm_unmap_ram(buf->vaddr, buf->sg_desc.num_pages);
241         while (--i >= 0) {
242                 if (buf->write)
243                         set_page_dirty_lock(buf->pages[i]);
244                 put_page(buf->pages[i]);
245         }
246         vfree(buf->sg_desc.sglist);
247         kfree(buf->pages);
248         kfree(buf);
249 }
250
251 static void *vb2_dma_sg_vaddr(void *buf_priv)
252 {
253         struct vb2_dma_sg_buf *buf = buf_priv;
254
255         BUG_ON(!buf);
256
257         if (!buf->vaddr)
258                 buf->vaddr = vm_map_ram(buf->pages,
259                                         buf->sg_desc.num_pages,
260                                         -1,
261                                         PAGE_KERNEL);
262
263         /* add offset in case userptr is not page-aligned */
264         return buf->vaddr + buf->offset;
265 }
266
267 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
268 {
269         struct vb2_dma_sg_buf *buf = buf_priv;
270
271         return atomic_read(&buf->refcount);
272 }
273
274 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
275 {
276         struct vb2_dma_sg_buf *buf = buf_priv;
277         unsigned long uaddr = vma->vm_start;
278         unsigned long usize = vma->vm_end - vma->vm_start;
279         int i = 0;
280
281         if (!buf) {
282                 printk(KERN_ERR "No memory to map\n");
283                 return -EINVAL;
284         }
285
286         do {
287                 int ret;
288
289                 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
290                 if (ret) {
291                         printk(KERN_ERR "Remapping memory, error: %d\n", ret);
292                         return ret;
293                 }
294
295                 uaddr += PAGE_SIZE;
296                 usize -= PAGE_SIZE;
297         } while (usize > 0);
298
299
300         /*
301          * Use common vm_area operations to track buffer refcount.
302          */
303         vma->vm_private_data    = &buf->handler;
304         vma->vm_ops             = &vb2_common_vm_ops;
305
306         vma->vm_ops->open(vma);
307
308         return 0;
309 }
310
311 static void *vb2_dma_sg_cookie(void *buf_priv)
312 {
313         struct vb2_dma_sg_buf *buf = buf_priv;
314
315         return &buf->sg_desc;
316 }
317
318 const struct vb2_mem_ops vb2_dma_sg_memops = {
319         .alloc          = vb2_dma_sg_alloc,
320         .put            = vb2_dma_sg_put,
321         .get_userptr    = vb2_dma_sg_get_userptr,
322         .put_userptr    = vb2_dma_sg_put_userptr,
323         .vaddr          = vb2_dma_sg_vaddr,
324         .mmap           = vb2_dma_sg_mmap,
325         .num_users      = vb2_dma_sg_num_users,
326         .cookie         = vb2_dma_sg_cookie,
327 };
328 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
329
330 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
331 MODULE_AUTHOR("Andrzej Pietrasiewicz");
332 MODULE_LICENSE("GPL");