]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/dma-buf/dma-buf.c
Merge remote-tracking branch 'jc_docs/docs-next'
[karo-tx-linux.git] / drivers / dma-buf / dma-buf.c
1 /*
2  * Framework for buffer objects that can be shared across devices/subsystems.
3  *
4  * Copyright(C) 2011 Linaro Limited. All rights reserved.
5  * Author: Sumit Semwal <sumit.semwal@ti.com>
6  *
7  * Many thanks to linaro-mm-sig list, and specially
8  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10  * refining of this idea.
11  *
12  * This program is free software; you can redistribute it and/or modify it
13  * under the terms of the GNU General Public License version 2 as published by
14  * the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but WITHOUT
17  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
19  * more details.
20  *
21  * You should have received a copy of the GNU General Public License along with
22  * this program.  If not, see <http://www.gnu.org/licenses/>.
23  */
24
25 #include <linux/fs.h>
26 #include <linux/slab.h>
27 #include <linux/dma-buf.h>
28 #include <linux/fence.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/export.h>
31 #include <linux/debugfs.h>
32 #include <linux/module.h>
33 #include <linux/seq_file.h>
34 #include <linux/poll.h>
35 #include <linux/reservation.h>
36
37 static inline int is_dma_buf_file(struct file *);
38
39 struct dma_buf_list {
40         struct list_head head;
41         struct mutex lock;
42 };
43
44 static struct dma_buf_list db_list;
45
46 static int dma_buf_release(struct inode *inode, struct file *file)
47 {
48         struct dma_buf *dmabuf;
49
50         if (!is_dma_buf_file(file))
51                 return -EINVAL;
52
53         dmabuf = file->private_data;
54
55         BUG_ON(dmabuf->vmapping_counter);
56
57         /*
58          * Any fences that a dma-buf poll can wait on should be signaled
59          * before releasing dma-buf. This is the responsibility of each
60          * driver that uses the reservation objects.
61          *
62          * If you hit this BUG() it means someone dropped their ref to the
63          * dma-buf while still having pending operation to the buffer.
64          */
65         BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
66
67         dmabuf->ops->release(dmabuf);
68
69         mutex_lock(&db_list.lock);
70         list_del(&dmabuf->list_node);
71         mutex_unlock(&db_list.lock);
72
73         if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
74                 reservation_object_fini(dmabuf->resv);
75
76         module_put(dmabuf->owner);
77         kfree(dmabuf);
78         return 0;
79 }
80
81 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
82 {
83         struct dma_buf *dmabuf;
84
85         if (!is_dma_buf_file(file))
86                 return -EINVAL;
87
88         dmabuf = file->private_data;
89
90         /* check for overflowing the buffer's size */
91         if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
92             dmabuf->size >> PAGE_SHIFT)
93                 return -EINVAL;
94
95         return dmabuf->ops->mmap(dmabuf, vma);
96 }
97
98 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
99 {
100         struct dma_buf *dmabuf;
101         loff_t base;
102
103         if (!is_dma_buf_file(file))
104                 return -EBADF;
105
106         dmabuf = file->private_data;
107
108         /* only support discovering the end of the buffer,
109            but also allow SEEK_SET to maintain the idiomatic
110            SEEK_END(0), SEEK_CUR(0) pattern */
111         if (whence == SEEK_END)
112                 base = dmabuf->size;
113         else if (whence == SEEK_SET)
114                 base = 0;
115         else
116                 return -EINVAL;
117
118         if (offset != 0)
119                 return -EINVAL;
120
121         return base + offset;
122 }
123
124 static void dma_buf_poll_cb(struct fence *fence, struct fence_cb *cb)
125 {
126         struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
127         unsigned long flags;
128
129         spin_lock_irqsave(&dcb->poll->lock, flags);
130         wake_up_locked_poll(dcb->poll, dcb->active);
131         dcb->active = 0;
132         spin_unlock_irqrestore(&dcb->poll->lock, flags);
133 }
134
135 static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
136 {
137         struct dma_buf *dmabuf;
138         struct reservation_object *resv;
139         struct reservation_object_list *fobj;
140         struct fence *fence_excl;
141         unsigned long events;
142         unsigned shared_count, seq;
143
144         dmabuf = file->private_data;
145         if (!dmabuf || !dmabuf->resv)
146                 return POLLERR;
147
148         resv = dmabuf->resv;
149
150         poll_wait(file, &dmabuf->poll, poll);
151
152         events = poll_requested_events(poll) & (POLLIN | POLLOUT);
153         if (!events)
154                 return 0;
155
156 retry:
157         seq = read_seqcount_begin(&resv->seq);
158         rcu_read_lock();
159
160         fobj = rcu_dereference(resv->fence);
161         if (fobj)
162                 shared_count = fobj->shared_count;
163         else
164                 shared_count = 0;
165         fence_excl = rcu_dereference(resv->fence_excl);
166         if (read_seqcount_retry(&resv->seq, seq)) {
167                 rcu_read_unlock();
168                 goto retry;
169         }
170
171         if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
172                 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
173                 unsigned long pevents = POLLIN;
174
175                 if (shared_count == 0)
176                         pevents |= POLLOUT;
177
178                 spin_lock_irq(&dmabuf->poll.lock);
179                 if (dcb->active) {
180                         dcb->active |= pevents;
181                         events &= ~pevents;
182                 } else
183                         dcb->active = pevents;
184                 spin_unlock_irq(&dmabuf->poll.lock);
185
186                 if (events & pevents) {
187                         if (!fence_get_rcu(fence_excl)) {
188                                 /* force a recheck */
189                                 events &= ~pevents;
190                                 dma_buf_poll_cb(NULL, &dcb->cb);
191                         } else if (!fence_add_callback(fence_excl, &dcb->cb,
192                                                        dma_buf_poll_cb)) {
193                                 events &= ~pevents;
194                                 fence_put(fence_excl);
195                         } else {
196                                 /*
197                                  * No callback queued, wake up any additional
198                                  * waiters.
199                                  */
200                                 fence_put(fence_excl);
201                                 dma_buf_poll_cb(NULL, &dcb->cb);
202                         }
203                 }
204         }
205
206         if ((events & POLLOUT) && shared_count > 0) {
207                 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
208                 int i;
209
210                 /* Only queue a new callback if no event has fired yet */
211                 spin_lock_irq(&dmabuf->poll.lock);
212                 if (dcb->active)
213                         events &= ~POLLOUT;
214                 else
215                         dcb->active = POLLOUT;
216                 spin_unlock_irq(&dmabuf->poll.lock);
217
218                 if (!(events & POLLOUT))
219                         goto out;
220
221                 for (i = 0; i < shared_count; ++i) {
222                         struct fence *fence = rcu_dereference(fobj->shared[i]);
223
224                         if (!fence_get_rcu(fence)) {
225                                 /*
226                                  * fence refcount dropped to zero, this means
227                                  * that fobj has been freed
228                                  *
229                                  * call dma_buf_poll_cb and force a recheck!
230                                  */
231                                 events &= ~POLLOUT;
232                                 dma_buf_poll_cb(NULL, &dcb->cb);
233                                 break;
234                         }
235                         if (!fence_add_callback(fence, &dcb->cb,
236                                                 dma_buf_poll_cb)) {
237                                 fence_put(fence);
238                                 events &= ~POLLOUT;
239                                 break;
240                         }
241                         fence_put(fence);
242                 }
243
244                 /* No callback queued, wake up any additional waiters. */
245                 if (i == shared_count)
246                         dma_buf_poll_cb(NULL, &dcb->cb);
247         }
248
249 out:
250         rcu_read_unlock();
251         return events;
252 }
253
254 static const struct file_operations dma_buf_fops = {
255         .release        = dma_buf_release,
256         .mmap           = dma_buf_mmap_internal,
257         .llseek         = dma_buf_llseek,
258         .poll           = dma_buf_poll,
259 };
260
261 /*
262  * is_dma_buf_file - Check if struct file* is associated with dma_buf
263  */
264 static inline int is_dma_buf_file(struct file *file)
265 {
266         return file->f_op == &dma_buf_fops;
267 }
268
269 /**
270  * dma_buf_export - Creates a new dma_buf, and associates an anon file
271  * with this buffer, so it can be exported.
272  * Also connect the allocator specific data and ops to the buffer.
273  * Additionally, provide a name string for exporter; useful in debugging.
274  *
275  * @exp_info:   [in]    holds all the export related information provided
276  *                      by the exporter. see struct dma_buf_export_info
277  *                      for further details.
278  *
279  * Returns, on success, a newly created dma_buf object, which wraps the
280  * supplied private data and operations for dma_buf_ops. On either missing
281  * ops, or error in allocating struct dma_buf, will return negative error.
282  *
283  */
284 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
285 {
286         struct dma_buf *dmabuf;
287         struct reservation_object *resv = exp_info->resv;
288         struct file *file;
289         size_t alloc_size = sizeof(struct dma_buf);
290
291         if (!exp_info->resv)
292                 alloc_size += sizeof(struct reservation_object);
293         else
294                 /* prevent &dma_buf[1] == dma_buf->resv */
295                 alloc_size += 1;
296
297         if (WARN_ON(!exp_info->priv
298                           || !exp_info->ops
299                           || !exp_info->ops->map_dma_buf
300                           || !exp_info->ops->unmap_dma_buf
301                           || !exp_info->ops->release
302                           || !exp_info->ops->kmap_atomic
303                           || !exp_info->ops->kmap
304                           || !exp_info->ops->mmap)) {
305                 return ERR_PTR(-EINVAL);
306         }
307
308         if (!try_module_get(exp_info->owner))
309                 return ERR_PTR(-ENOENT);
310
311         dmabuf = kzalloc(alloc_size, GFP_KERNEL);
312         if (!dmabuf) {
313                 module_put(exp_info->owner);
314                 return ERR_PTR(-ENOMEM);
315         }
316
317         dmabuf->priv = exp_info->priv;
318         dmabuf->ops = exp_info->ops;
319         dmabuf->size = exp_info->size;
320         dmabuf->exp_name = exp_info->exp_name;
321         dmabuf->owner = exp_info->owner;
322         init_waitqueue_head(&dmabuf->poll);
323         dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
324         dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
325
326         if (!resv) {
327                 resv = (struct reservation_object *)&dmabuf[1];
328                 reservation_object_init(resv);
329         }
330         dmabuf->resv = resv;
331
332         file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
333                                         exp_info->flags);
334         if (IS_ERR(file)) {
335                 kfree(dmabuf);
336                 return ERR_CAST(file);
337         }
338
339         file->f_mode |= FMODE_LSEEK;
340         dmabuf->file = file;
341
342         mutex_init(&dmabuf->lock);
343         INIT_LIST_HEAD(&dmabuf->attachments);
344
345         mutex_lock(&db_list.lock);
346         list_add(&dmabuf->list_node, &db_list.head);
347         mutex_unlock(&db_list.lock);
348
349         return dmabuf;
350 }
351 EXPORT_SYMBOL_GPL(dma_buf_export);
352
353 /**
354  * dma_buf_fd - returns a file descriptor for the given dma_buf
355  * @dmabuf:     [in]    pointer to dma_buf for which fd is required.
356  * @flags:      [in]    flags to give to fd
357  *
358  * On success, returns an associated 'fd'. Else, returns error.
359  */
360 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
361 {
362         int fd;
363
364         if (!dmabuf || !dmabuf->file)
365                 return -EINVAL;
366
367         fd = get_unused_fd_flags(flags);
368         if (fd < 0)
369                 return fd;
370
371         fd_install(fd, dmabuf->file);
372
373         return fd;
374 }
375 EXPORT_SYMBOL_GPL(dma_buf_fd);
376
377 /**
378  * dma_buf_get - returns the dma_buf structure related to an fd
379  * @fd: [in]    fd associated with the dma_buf to be returned
380  *
381  * On success, returns the dma_buf structure associated with an fd; uses
382  * file's refcounting done by fget to increase refcount. returns ERR_PTR
383  * otherwise.
384  */
385 struct dma_buf *dma_buf_get(int fd)
386 {
387         struct file *file;
388
389         file = fget(fd);
390
391         if (!file)
392                 return ERR_PTR(-EBADF);
393
394         if (!is_dma_buf_file(file)) {
395                 fput(file);
396                 return ERR_PTR(-EINVAL);
397         }
398
399         return file->private_data;
400 }
401 EXPORT_SYMBOL_GPL(dma_buf_get);
402
403 /**
404  * dma_buf_put - decreases refcount of the buffer
405  * @dmabuf:     [in]    buffer to reduce refcount of
406  *
407  * Uses file's refcounting done implicitly by fput()
408  */
409 void dma_buf_put(struct dma_buf *dmabuf)
410 {
411         if (WARN_ON(!dmabuf || !dmabuf->file))
412                 return;
413
414         fput(dmabuf->file);
415 }
416 EXPORT_SYMBOL_GPL(dma_buf_put);
417
418 /**
419  * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
420  * calls attach() of dma_buf_ops to allow device-specific attach functionality
421  * @dmabuf:     [in]    buffer to attach device to.
422  * @dev:        [in]    device to be attached.
423  *
424  * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on
425  * error.
426  */
427 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
428                                           struct device *dev)
429 {
430         struct dma_buf_attachment *attach;
431         int ret;
432
433         if (WARN_ON(!dmabuf || !dev))
434                 return ERR_PTR(-EINVAL);
435
436         attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
437         if (attach == NULL)
438                 return ERR_PTR(-ENOMEM);
439
440         attach->dev = dev;
441         attach->dmabuf = dmabuf;
442
443         mutex_lock(&dmabuf->lock);
444
445         if (dmabuf->ops->attach) {
446                 ret = dmabuf->ops->attach(dmabuf, dev, attach);
447                 if (ret)
448                         goto err_attach;
449         }
450         list_add(&attach->node, &dmabuf->attachments);
451
452         mutex_unlock(&dmabuf->lock);
453         return attach;
454
455 err_attach:
456         kfree(attach);
457         mutex_unlock(&dmabuf->lock);
458         return ERR_PTR(ret);
459 }
460 EXPORT_SYMBOL_GPL(dma_buf_attach);
461
462 /**
463  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
464  * optionally calls detach() of dma_buf_ops for device-specific detach
465  * @dmabuf:     [in]    buffer to detach from.
466  * @attach:     [in]    attachment to be detached; is free'd after this call.
467  *
468  */
469 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
470 {
471         if (WARN_ON(!dmabuf || !attach))
472                 return;
473
474         mutex_lock(&dmabuf->lock);
475         list_del(&attach->node);
476         if (dmabuf->ops->detach)
477                 dmabuf->ops->detach(dmabuf, attach);
478
479         mutex_unlock(&dmabuf->lock);
480         kfree(attach);
481 }
482 EXPORT_SYMBOL_GPL(dma_buf_detach);
483
484 /**
485  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
486  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
487  * dma_buf_ops.
488  * @attach:     [in]    attachment whose scatterlist is to be returned
489  * @direction:  [in]    direction of DMA transfer
490  *
491  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
492  * on error.
493  */
494 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
495                                         enum dma_data_direction direction)
496 {
497         struct sg_table *sg_table = ERR_PTR(-EINVAL);
498
499         might_sleep();
500
501         if (WARN_ON(!attach || !attach->dmabuf))
502                 return ERR_PTR(-EINVAL);
503
504         sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
505         if (!sg_table)
506                 sg_table = ERR_PTR(-ENOMEM);
507
508         return sg_table;
509 }
510 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
511
512 /**
513  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
514  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
515  * dma_buf_ops.
516  * @attach:     [in]    attachment to unmap buffer from
517  * @sg_table:   [in]    scatterlist info of the buffer to unmap
518  * @direction:  [in]    direction of DMA transfer
519  *
520  */
521 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
522                                 struct sg_table *sg_table,
523                                 enum dma_data_direction direction)
524 {
525         might_sleep();
526
527         if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
528                 return;
529
530         attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
531                                                 direction);
532 }
533 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
534
535
536 /**
537  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
538  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
539  * preparations. Coherency is only guaranteed in the specified range for the
540  * specified access direction.
541  * @dmabuf:     [in]    buffer to prepare cpu access for.
542  * @direction:  [in]    length of range for cpu access.
543  *
544  * Can return negative error values, returns 0 on success.
545  */
546 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
547                              enum dma_data_direction direction)
548 {
549         int ret = 0;
550
551         if (WARN_ON(!dmabuf))
552                 return -EINVAL;
553
554         if (dmabuf->ops->begin_cpu_access)
555                 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
556
557         return ret;
558 }
559 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
560
561 /**
562  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
563  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
564  * actions. Coherency is only guaranteed in the specified range for the
565  * specified access direction.
566  * @dmabuf:     [in]    buffer to complete cpu access for.
567  * @direction:  [in]    length of range for cpu access.
568  *
569  * This call must always succeed.
570  */
571 void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
572                             enum dma_data_direction direction)
573 {
574         WARN_ON(!dmabuf);
575
576         if (dmabuf->ops->end_cpu_access)
577                 dmabuf->ops->end_cpu_access(dmabuf, direction);
578 }
579 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
580
581 /**
582  * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
583  * space. The same restrictions as for kmap_atomic and friends apply.
584  * @dmabuf:     [in]    buffer to map page from.
585  * @page_num:   [in]    page in PAGE_SIZE units to map.
586  *
587  * This call must always succeed, any necessary preparations that might fail
588  * need to be done in begin_cpu_access.
589  */
590 void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
591 {
592         WARN_ON(!dmabuf);
593
594         return dmabuf->ops->kmap_atomic(dmabuf, page_num);
595 }
596 EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
597
598 /**
599  * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
600  * @dmabuf:     [in]    buffer to unmap page from.
601  * @page_num:   [in]    page in PAGE_SIZE units to unmap.
602  * @vaddr:      [in]    kernel space pointer obtained from dma_buf_kmap_atomic.
603  *
604  * This call must always succeed.
605  */
606 void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
607                            void *vaddr)
608 {
609         WARN_ON(!dmabuf);
610
611         if (dmabuf->ops->kunmap_atomic)
612                 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
613 }
614 EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
615
616 /**
617  * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
618  * same restrictions as for kmap and friends apply.
619  * @dmabuf:     [in]    buffer to map page from.
620  * @page_num:   [in]    page in PAGE_SIZE units to map.
621  *
622  * This call must always succeed, any necessary preparations that might fail
623  * need to be done in begin_cpu_access.
624  */
625 void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
626 {
627         WARN_ON(!dmabuf);
628
629         return dmabuf->ops->kmap(dmabuf, page_num);
630 }
631 EXPORT_SYMBOL_GPL(dma_buf_kmap);
632
633 /**
634  * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
635  * @dmabuf:     [in]    buffer to unmap page from.
636  * @page_num:   [in]    page in PAGE_SIZE units to unmap.
637  * @vaddr:      [in]    kernel space pointer obtained from dma_buf_kmap.
638  *
639  * This call must always succeed.
640  */
641 void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
642                     void *vaddr)
643 {
644         WARN_ON(!dmabuf);
645
646         if (dmabuf->ops->kunmap)
647                 dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
648 }
649 EXPORT_SYMBOL_GPL(dma_buf_kunmap);
650
651
652 /**
653  * dma_buf_mmap - Setup up a userspace mmap with the given vma
654  * @dmabuf:     [in]    buffer that should back the vma
655  * @vma:        [in]    vma for the mmap
656  * @pgoff:      [in]    offset in pages where this mmap should start within the
657  *                      dma-buf buffer.
658  *
659  * This function adjusts the passed in vma so that it points at the file of the
660  * dma_buf operation. It also adjusts the starting pgoff and does bounds
661  * checking on the size of the vma. Then it calls the exporters mmap function to
662  * set up the mapping.
663  *
664  * Can return negative error values, returns 0 on success.
665  */
666 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
667                  unsigned long pgoff)
668 {
669         struct file *oldfile;
670         int ret;
671
672         if (WARN_ON(!dmabuf || !vma))
673                 return -EINVAL;
674
675         /* check for offset overflow */
676         if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
677                 return -EOVERFLOW;
678
679         /* check for overflowing the buffer's size */
680         if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
681             dmabuf->size >> PAGE_SHIFT)
682                 return -EINVAL;
683
684         /* readjust the vma */
685         get_file(dmabuf->file);
686         oldfile = vma->vm_file;
687         vma->vm_file = dmabuf->file;
688         vma->vm_pgoff = pgoff;
689
690         ret = dmabuf->ops->mmap(dmabuf, vma);
691         if (ret) {
692                 /* restore old parameters on failure */
693                 vma->vm_file = oldfile;
694                 fput(dmabuf->file);
695         } else {
696                 if (oldfile)
697                         fput(oldfile);
698         }
699         return ret;
700
701 }
702 EXPORT_SYMBOL_GPL(dma_buf_mmap);
703
704 /**
705  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
706  * address space. Same restrictions as for vmap and friends apply.
707  * @dmabuf:     [in]    buffer to vmap
708  *
709  * This call may fail due to lack of virtual mapping address space.
710  * These calls are optional in drivers. The intended use for them
711  * is for mapping objects linear in kernel space for high use objects.
712  * Please attempt to use kmap/kunmap before thinking about these interfaces.
713  *
714  * Returns NULL on error.
715  */
716 void *dma_buf_vmap(struct dma_buf *dmabuf)
717 {
718         void *ptr;
719
720         if (WARN_ON(!dmabuf))
721                 return NULL;
722
723         if (!dmabuf->ops->vmap)
724                 return NULL;
725
726         mutex_lock(&dmabuf->lock);
727         if (dmabuf->vmapping_counter) {
728                 dmabuf->vmapping_counter++;
729                 BUG_ON(!dmabuf->vmap_ptr);
730                 ptr = dmabuf->vmap_ptr;
731                 goto out_unlock;
732         }
733
734         BUG_ON(dmabuf->vmap_ptr);
735
736         ptr = dmabuf->ops->vmap(dmabuf);
737         if (WARN_ON_ONCE(IS_ERR(ptr)))
738                 ptr = NULL;
739         if (!ptr)
740                 goto out_unlock;
741
742         dmabuf->vmap_ptr = ptr;
743         dmabuf->vmapping_counter = 1;
744
745 out_unlock:
746         mutex_unlock(&dmabuf->lock);
747         return ptr;
748 }
749 EXPORT_SYMBOL_GPL(dma_buf_vmap);
750
751 /**
752  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
753  * @dmabuf:     [in]    buffer to vunmap
754  * @vaddr:      [in]    vmap to vunmap
755  */
756 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
757 {
758         if (WARN_ON(!dmabuf))
759                 return;
760
761         BUG_ON(!dmabuf->vmap_ptr);
762         BUG_ON(dmabuf->vmapping_counter == 0);
763         BUG_ON(dmabuf->vmap_ptr != vaddr);
764
765         mutex_lock(&dmabuf->lock);
766         if (--dmabuf->vmapping_counter == 0) {
767                 if (dmabuf->ops->vunmap)
768                         dmabuf->ops->vunmap(dmabuf, vaddr);
769                 dmabuf->vmap_ptr = NULL;
770         }
771         mutex_unlock(&dmabuf->lock);
772 }
773 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
774
775 #ifdef CONFIG_DEBUG_FS
776 static int dma_buf_describe(struct seq_file *s)
777 {
778         int ret;
779         struct dma_buf *buf_obj;
780         struct dma_buf_attachment *attach_obj;
781         int count = 0, attach_count;
782         size_t size = 0;
783
784         ret = mutex_lock_interruptible(&db_list.lock);
785
786         if (ret)
787                 return ret;
788
789         seq_puts(s, "\nDma-buf Objects:\n");
790         seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
791
792         list_for_each_entry(buf_obj, &db_list.head, list_node) {
793                 ret = mutex_lock_interruptible(&buf_obj->lock);
794
795                 if (ret) {
796                         seq_puts(s,
797                                  "\tERROR locking buffer object: skipping\n");
798                         continue;
799                 }
800
801                 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
802                                 buf_obj->size,
803                                 buf_obj->file->f_flags, buf_obj->file->f_mode,
804                                 file_count(buf_obj->file),
805                                 buf_obj->exp_name);
806
807                 seq_puts(s, "\tAttached Devices:\n");
808                 attach_count = 0;
809
810                 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
811                         seq_puts(s, "\t");
812
813                         seq_printf(s, "%s\n", dev_name(attach_obj->dev));
814                         attach_count++;
815                 }
816
817                 seq_printf(s, "Total %d devices attached\n\n",
818                                 attach_count);
819
820                 count++;
821                 size += buf_obj->size;
822                 mutex_unlock(&buf_obj->lock);
823         }
824
825         seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
826
827         mutex_unlock(&db_list.lock);
828         return 0;
829 }
830
831 static int dma_buf_show(struct seq_file *s, void *unused)
832 {
833         void (*func)(struct seq_file *) = s->private;
834
835         func(s);
836         return 0;
837 }
838
839 static int dma_buf_debug_open(struct inode *inode, struct file *file)
840 {
841         return single_open(file, dma_buf_show, inode->i_private);
842 }
843
844 static const struct file_operations dma_buf_debug_fops = {
845         .open           = dma_buf_debug_open,
846         .read           = seq_read,
847         .llseek         = seq_lseek,
848         .release        = single_release,
849 };
850
851 static struct dentry *dma_buf_debugfs_dir;
852
853 static int dma_buf_init_debugfs(void)
854 {
855         int err = 0;
856
857         dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL);
858
859         if (IS_ERR(dma_buf_debugfs_dir)) {
860                 err = PTR_ERR(dma_buf_debugfs_dir);
861                 dma_buf_debugfs_dir = NULL;
862                 return err;
863         }
864
865         err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe);
866
867         if (err)
868                 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
869
870         return err;
871 }
872
873 static void dma_buf_uninit_debugfs(void)
874 {
875         if (dma_buf_debugfs_dir)
876                 debugfs_remove_recursive(dma_buf_debugfs_dir);
877 }
878
879 int dma_buf_debugfs_create_file(const char *name,
880                                 int (*write)(struct seq_file *))
881 {
882         struct dentry *d;
883
884         d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
885                         write, &dma_buf_debug_fops);
886
887         return PTR_ERR_OR_ZERO(d);
888 }
889 #else
890 static inline int dma_buf_init_debugfs(void)
891 {
892         return 0;
893 }
894 static inline void dma_buf_uninit_debugfs(void)
895 {
896 }
897 #endif
898
899 static int __init dma_buf_init(void)
900 {
901         mutex_init(&db_list.lock);
902         INIT_LIST_HEAD(&db_list.head);
903         dma_buf_init_debugfs();
904         return 0;
905 }
906 subsys_initcall(dma_buf_init);
907
908 static void __exit dma_buf_deinit(void)
909 {
910         dma_buf_uninit_debugfs();
911 }
912 __exitcall(dma_buf_deinit);