2 * Framework for buffer objects that can be shared across devices/subsystems.
4 * Copyright(C) 2011 Linaro Limited. All rights reserved.
5 * Author: Sumit Semwal <sumit.semwal@ti.com>
7 * Many thanks to linaro-mm-sig list, and specially
8 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10 * refining of this idea.
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License version 2 as published by
14 * the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program. If not, see <http://www.gnu.org/licenses/>.
26 #include <linux/slab.h>
27 #include <linux/dma-buf.h>
28 #include <linux/dma-fence.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/export.h>
31 #include <linux/debugfs.h>
32 #include <linux/module.h>
33 #include <linux/seq_file.h>
34 #include <linux/poll.h>
35 #include <linux/reservation.h>
38 #include <uapi/linux/dma-buf.h>
40 static inline int is_dma_buf_file(struct file *);
43 struct list_head head;
47 static struct dma_buf_list db_list;
49 static int dma_buf_release(struct inode *inode, struct file *file)
51 struct dma_buf *dmabuf;
53 if (!is_dma_buf_file(file))
56 dmabuf = file->private_data;
58 BUG_ON(dmabuf->vmapping_counter);
61 * Any fences that a dma-buf poll can wait on should be signaled
62 * before releasing dma-buf. This is the responsibility of each
63 * driver that uses the reservation objects.
65 * If you hit this BUG() it means someone dropped their ref to the
66 * dma-buf while still having pending operation to the buffer.
68 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
70 dmabuf->ops->release(dmabuf);
72 mutex_lock(&db_list.lock);
73 list_del(&dmabuf->list_node);
74 mutex_unlock(&db_list.lock);
76 if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
77 reservation_object_fini(dmabuf->resv);
79 module_put(dmabuf->owner);
84 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
86 struct dma_buf *dmabuf;
88 if (!is_dma_buf_file(file))
91 dmabuf = file->private_data;
93 /* check for overflowing the buffer's size */
94 if (vma->vm_pgoff + vma_pages(vma) >
95 dmabuf->size >> PAGE_SHIFT)
98 return dmabuf->ops->mmap(dmabuf, vma);
101 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
103 struct dma_buf *dmabuf;
106 if (!is_dma_buf_file(file))
109 dmabuf = file->private_data;
111 /* only support discovering the end of the buffer,
112 but also allow SEEK_SET to maintain the idiomatic
113 SEEK_END(0), SEEK_CUR(0) pattern */
114 if (whence == SEEK_END)
116 else if (whence == SEEK_SET)
124 return base + offset;
130 * To support cross-device and cross-driver synchronization of buffer access
131 * implicit fences (represented internally in the kernel with &struct fence) can
132 * be attached to a &dma_buf. The glue for that and a few related things are
133 * provided in the &reservation_object structure.
135 * Userspace can query the state of these implicitly tracked fences using poll()
136 * and related system calls:
138 * - Checking for POLLIN, i.e. read access, can be use to query the state of the
139 * most recent write or exclusive fence.
141 * - Checking for POLLOUT, i.e. write access, can be used to query the state of
142 * all attached fences, shared and exclusive ones.
144 * Note that this only signals the completion of the respective fences, i.e. the
145 * DMA transfers are complete. Cache flushing and any other necessary
146 * preparations before CPU access can begin still need to happen.
149 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
151 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
154 spin_lock_irqsave(&dcb->poll->lock, flags);
155 wake_up_locked_poll(dcb->poll, dcb->active);
157 spin_unlock_irqrestore(&dcb->poll->lock, flags);
160 static unsigned int dma_buf_poll(struct file *file, poll_table *poll)
162 struct dma_buf *dmabuf;
163 struct reservation_object *resv;
164 struct reservation_object_list *fobj;
165 struct dma_fence *fence_excl;
166 unsigned long events;
167 unsigned shared_count, seq;
169 dmabuf = file->private_data;
170 if (!dmabuf || !dmabuf->resv)
175 poll_wait(file, &dmabuf->poll, poll);
177 events = poll_requested_events(poll) & (POLLIN | POLLOUT);
182 seq = read_seqcount_begin(&resv->seq);
185 fobj = rcu_dereference(resv->fence);
187 shared_count = fobj->shared_count;
190 fence_excl = rcu_dereference(resv->fence_excl);
191 if (read_seqcount_retry(&resv->seq, seq)) {
196 if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
197 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
198 unsigned long pevents = POLLIN;
200 if (shared_count == 0)
203 spin_lock_irq(&dmabuf->poll.lock);
205 dcb->active |= pevents;
208 dcb->active = pevents;
209 spin_unlock_irq(&dmabuf->poll.lock);
211 if (events & pevents) {
212 if (!dma_fence_get_rcu(fence_excl)) {
213 /* force a recheck */
215 dma_buf_poll_cb(NULL, &dcb->cb);
216 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
219 dma_fence_put(fence_excl);
222 * No callback queued, wake up any additional
225 dma_fence_put(fence_excl);
226 dma_buf_poll_cb(NULL, &dcb->cb);
231 if ((events & POLLOUT) && shared_count > 0) {
232 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
235 /* Only queue a new callback if no event has fired yet */
236 spin_lock_irq(&dmabuf->poll.lock);
240 dcb->active = POLLOUT;
241 spin_unlock_irq(&dmabuf->poll.lock);
243 if (!(events & POLLOUT))
246 for (i = 0; i < shared_count; ++i) {
247 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
249 if (!dma_fence_get_rcu(fence)) {
251 * fence refcount dropped to zero, this means
252 * that fobj has been freed
254 * call dma_buf_poll_cb and force a recheck!
257 dma_buf_poll_cb(NULL, &dcb->cb);
260 if (!dma_fence_add_callback(fence, &dcb->cb,
262 dma_fence_put(fence);
266 dma_fence_put(fence);
269 /* No callback queued, wake up any additional waiters. */
270 if (i == shared_count)
271 dma_buf_poll_cb(NULL, &dcb->cb);
279 static long dma_buf_ioctl(struct file *file,
280 unsigned int cmd, unsigned long arg)
282 struct dma_buf *dmabuf;
283 struct dma_buf_sync sync;
284 enum dma_data_direction direction;
287 dmabuf = file->private_data;
290 case DMA_BUF_IOCTL_SYNC:
291 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
294 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
297 switch (sync.flags & DMA_BUF_SYNC_RW) {
298 case DMA_BUF_SYNC_READ:
299 direction = DMA_FROM_DEVICE;
301 case DMA_BUF_SYNC_WRITE:
302 direction = DMA_TO_DEVICE;
304 case DMA_BUF_SYNC_RW:
305 direction = DMA_BIDIRECTIONAL;
311 if (sync.flags & DMA_BUF_SYNC_END)
312 ret = dma_buf_end_cpu_access(dmabuf, direction);
314 ret = dma_buf_begin_cpu_access(dmabuf, direction);
322 static const struct file_operations dma_buf_fops = {
323 .release = dma_buf_release,
324 .mmap = dma_buf_mmap_internal,
325 .llseek = dma_buf_llseek,
326 .poll = dma_buf_poll,
327 .unlocked_ioctl = dma_buf_ioctl,
331 * is_dma_buf_file - Check if struct file* is associated with dma_buf
333 static inline int is_dma_buf_file(struct file *file)
335 return file->f_op == &dma_buf_fops;
339 * DOC: dma buf device access
341 * For device DMA access to a shared DMA buffer the usual sequence of operations
344 * 1. The exporter defines his exporter instance using
345 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
346 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
347 * as a file descriptor by calling dma_buf_fd().
349 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
350 * to share with: First the filedescriptor is converted to a &dma_buf using
351 * dma_buf_get(). The the buffer is attached to the device using
354 * Up to this stage the exporter is still free to migrate or reallocate the
357 * 3. Once the buffer is attached to all devices userspace can inniate DMA
358 * access to the shared buffer. In the kernel this is done by calling
359 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
361 * 4. Once a driver is done with a shared buffer it needs to call
362 * dma_buf_detach() (after cleaning up any mappings) and then release the
363 * reference acquired with dma_buf_get by calling dma_buf_put().
365 * For the detailed semantics exporters are expected to implement see
370 * dma_buf_export - Creates a new dma_buf, and associates an anon file
371 * with this buffer, so it can be exported.
372 * Also connect the allocator specific data and ops to the buffer.
373 * Additionally, provide a name string for exporter; useful in debugging.
375 * @exp_info: [in] holds all the export related information provided
376 * by the exporter. see &struct dma_buf_export_info
377 * for further details.
379 * Returns, on success, a newly created dma_buf object, which wraps the
380 * supplied private data and operations for dma_buf_ops. On either missing
381 * ops, or error in allocating struct dma_buf, will return negative error.
383 * For most cases the easiest way to create @exp_info is through the
384 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
386 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
388 struct dma_buf *dmabuf;
389 struct reservation_object *resv = exp_info->resv;
391 size_t alloc_size = sizeof(struct dma_buf);
395 alloc_size += sizeof(struct reservation_object);
397 /* prevent &dma_buf[1] == dma_buf->resv */
400 if (WARN_ON(!exp_info->priv
402 || !exp_info->ops->map_dma_buf
403 || !exp_info->ops->unmap_dma_buf
404 || !exp_info->ops->release
405 || !exp_info->ops->kmap_atomic
406 || !exp_info->ops->kmap
407 || !exp_info->ops->mmap)) {
408 return ERR_PTR(-EINVAL);
411 if (!try_module_get(exp_info->owner))
412 return ERR_PTR(-ENOENT);
414 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
420 dmabuf->priv = exp_info->priv;
421 dmabuf->ops = exp_info->ops;
422 dmabuf->size = exp_info->size;
423 dmabuf->exp_name = exp_info->exp_name;
424 dmabuf->owner = exp_info->owner;
425 init_waitqueue_head(&dmabuf->poll);
426 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
427 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
430 resv = (struct reservation_object *)&dmabuf[1];
431 reservation_object_init(resv);
435 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
442 file->f_mode |= FMODE_LSEEK;
445 mutex_init(&dmabuf->lock);
446 INIT_LIST_HEAD(&dmabuf->attachments);
448 mutex_lock(&db_list.lock);
449 list_add(&dmabuf->list_node, &db_list.head);
450 mutex_unlock(&db_list.lock);
457 module_put(exp_info->owner);
460 EXPORT_SYMBOL_GPL(dma_buf_export);
463 * dma_buf_fd - returns a file descriptor for the given dma_buf
464 * @dmabuf: [in] pointer to dma_buf for which fd is required.
465 * @flags: [in] flags to give to fd
467 * On success, returns an associated 'fd'. Else, returns error.
469 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
473 if (!dmabuf || !dmabuf->file)
476 fd = get_unused_fd_flags(flags);
480 fd_install(fd, dmabuf->file);
484 EXPORT_SYMBOL_GPL(dma_buf_fd);
487 * dma_buf_get - returns the dma_buf structure related to an fd
488 * @fd: [in] fd associated with the dma_buf to be returned
490 * On success, returns the dma_buf structure associated with an fd; uses
491 * file's refcounting done by fget to increase refcount. returns ERR_PTR
494 struct dma_buf *dma_buf_get(int fd)
501 return ERR_PTR(-EBADF);
503 if (!is_dma_buf_file(file)) {
505 return ERR_PTR(-EINVAL);
508 return file->private_data;
510 EXPORT_SYMBOL_GPL(dma_buf_get);
513 * dma_buf_put - decreases refcount of the buffer
514 * @dmabuf: [in] buffer to reduce refcount of
516 * Uses file's refcounting done implicitly by fput().
518 * If, as a result of this call, the refcount becomes 0, the 'release' file
519 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
520 * in turn, and frees the memory allocated for dmabuf when exported.
522 void dma_buf_put(struct dma_buf *dmabuf)
524 if (WARN_ON(!dmabuf || !dmabuf->file))
529 EXPORT_SYMBOL_GPL(dma_buf_put);
532 * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
533 * calls attach() of dma_buf_ops to allow device-specific attach functionality
534 * @dmabuf: [in] buffer to attach device to.
535 * @dev: [in] device to be attached.
537 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
538 * must be cleaned up by calling dma_buf_detach().
542 * A pointer to newly created &dma_buf_attachment on success, or a negative
543 * error code wrapped into a pointer on failure.
545 * Note that this can fail if the backing storage of @dmabuf is in a place not
546 * accessible to @dev, and cannot be moved to a more suitable place. This is
547 * indicated with the error code -EBUSY.
549 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
552 struct dma_buf_attachment *attach;
555 if (WARN_ON(!dmabuf || !dev))
556 return ERR_PTR(-EINVAL);
558 attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
560 return ERR_PTR(-ENOMEM);
563 attach->dmabuf = dmabuf;
565 mutex_lock(&dmabuf->lock);
567 if (dmabuf->ops->attach) {
568 ret = dmabuf->ops->attach(dmabuf, dev, attach);
572 list_add(&attach->node, &dmabuf->attachments);
574 mutex_unlock(&dmabuf->lock);
579 mutex_unlock(&dmabuf->lock);
582 EXPORT_SYMBOL_GPL(dma_buf_attach);
585 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
586 * optionally calls detach() of dma_buf_ops for device-specific detach
587 * @dmabuf: [in] buffer to detach from.
588 * @attach: [in] attachment to be detached; is free'd after this call.
590 * Clean up a device attachment obtained by calling dma_buf_attach().
592 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
594 if (WARN_ON(!dmabuf || !attach))
597 mutex_lock(&dmabuf->lock);
598 list_del(&attach->node);
599 if (dmabuf->ops->detach)
600 dmabuf->ops->detach(dmabuf, attach);
602 mutex_unlock(&dmabuf->lock);
605 EXPORT_SYMBOL_GPL(dma_buf_detach);
608 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
609 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
611 * @attach: [in] attachment whose scatterlist is to be returned
612 * @direction: [in] direction of DMA transfer
614 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
615 * on error. May return -EINTR if it is interrupted by a signal.
617 * A mapping must be unmapped again using dma_buf_map_attachment(). Note that
618 * the underlying backing storage is pinned for as long as a mapping exists,
619 * therefore users/importers should not hold onto a mapping for undue amounts of
622 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
623 enum dma_data_direction direction)
625 struct sg_table *sg_table = ERR_PTR(-EINVAL);
629 if (WARN_ON(!attach || !attach->dmabuf))
630 return ERR_PTR(-EINVAL);
632 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
634 sg_table = ERR_PTR(-ENOMEM);
638 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
641 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
642 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
644 * @attach: [in] attachment to unmap buffer from
645 * @sg_table: [in] scatterlist info of the buffer to unmap
646 * @direction: [in] direction of DMA transfer
648 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
650 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
651 struct sg_table *sg_table,
652 enum dma_data_direction direction)
656 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
659 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
662 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
667 * There are mutliple reasons for supporting CPU access to a dma buffer object:
669 * - Fallback operations in the kernel, for example when a device is connected
670 * over USB and the kernel needs to shuffle the data around first before
671 * sending it away. Cache coherency is handled by braketing any transactions
672 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
675 * To support dma_buf objects residing in highmem cpu access is page-based
676 * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
677 * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
678 * returns a pointer in kernel virtual address space. Afterwards the chunk
679 * needs to be unmapped again. There is no limit on how often a given chunk
680 * can be mapped and unmapped, i.e. the importer does not need to call
681 * begin_cpu_access again before mapping the same chunk again.
684 * void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
685 * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
687 * There are also atomic variants of these interfaces. Like for kmap they
688 * facilitate non-blocking fast-paths. Neither the importer nor the exporter
689 * (in the callback) is allowed to block when using these.
692 * void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
693 * void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
695 * For importers all the restrictions of using kmap apply, like the limited
696 * supply of kmap_atomic slots. Hence an importer shall only hold onto at
697 * max 2 atomic dma_buf kmaps at the same time (in any given process context).
699 * dma_buf kmap calls outside of the range specified in begin_cpu_access are
700 * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
701 * the partial chunks at the beginning and end but may return stale or bogus
702 * data outside of the range (in these partial chunks).
704 * Note that these calls need to always succeed. The exporter needs to
705 * complete any preparations that might fail in begin_cpu_access.
707 * For some cases the overhead of kmap can be too high, a vmap interface
708 * is introduced. This interface should be used very carefully, as vmalloc
709 * space is a limited resources on many architectures.
712 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
713 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
715 * The vmap call can fail if there is no vmap support in the exporter, or if
716 * it runs out of vmalloc space. Fallback to kmap should be implemented. Note
717 * that the dma-buf layer keeps a reference count for all vmap access and
718 * calls down into the exporter's vmap function only when no vmapping exists,
719 * and only unmaps it once. Protection against concurrent vmap/vunmap calls is
720 * provided by taking the dma_buf->lock mutex.
722 * - For full compatibility on the importer side with existing userspace
723 * interfaces, which might already support mmap'ing buffers. This is needed in
724 * many processing pipelines (e.g. feeding a software rendered image into a
725 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
726 * framework already supported this and for DMA buffer file descriptors to
727 * replace ION buffers mmap support was needed.
729 * There is no special interfaces, userspace simply calls mmap on the dma-buf
730 * fd. But like for CPU access there's a need to braket the actual access,
731 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
732 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
735 * Some systems might need some sort of cache coherency management e.g. when
736 * CPU and GPU domains are being accessed through dma-buf at the same time.
737 * To circumvent this problem there are begin/end coherency markers, that
738 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
739 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
740 * sequence would be used like following:
743 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
744 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
745 * want (with the new data being consumed by say the GPU or the scanout
747 * - munmap once you don't need the buffer any more
749 * For correctness and optimal performance, it is always required to use
750 * SYNC_START and SYNC_END before and after, respectively, when accessing the
751 * mapped address. Userspace cannot rely on coherent access, even when there
752 * are systems where it just works without calling these ioctls.
754 * - And as a CPU fallback in userspace processing pipelines.
756 * Similar to the motivation for kernel cpu access it is again important that
757 * the userspace code of a given importing subsystem can use the same
758 * interfaces with a imported dma-buf buffer object as with a native buffer
759 * object. This is especially important for drm where the userspace part of
760 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
761 * use a different way to mmap a buffer rather invasive.
763 * The assumption in the current dma-buf interfaces is that redirecting the
764 * initial mmap is all that's needed. A survey of some of the existing
765 * subsystems shows that no driver seems to do any nefarious thing like
766 * syncing up with outstanding asynchronous processing on the device or
767 * allocating special resources at fault time. So hopefully this is good
768 * enough, since adding interfaces to intercept pagefaults and allow pte
769 * shootdowns would increase the complexity quite a bit.
772 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
775 * If the importing subsystem simply provides a special-purpose mmap call to
776 * set up a mapping in userspace, calling do_mmap with dma_buf->file will
777 * equally achieve that for a dma-buf object.
780 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
781 enum dma_data_direction direction)
783 bool write = (direction == DMA_BIDIRECTIONAL ||
784 direction == DMA_TO_DEVICE);
785 struct reservation_object *resv = dmabuf->resv;
788 /* Wait on any implicit rendering fences */
789 ret = reservation_object_wait_timeout_rcu(resv, write, true,
790 MAX_SCHEDULE_TIMEOUT);
798 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
799 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
800 * preparations. Coherency is only guaranteed in the specified range for the
801 * specified access direction.
802 * @dmabuf: [in] buffer to prepare cpu access for.
803 * @direction: [in] length of range for cpu access.
805 * After the cpu access is complete the caller should call
806 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
807 * it guaranteed to be coherent with other DMA access.
809 * Can return negative error values, returns 0 on success.
811 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
812 enum dma_data_direction direction)
816 if (WARN_ON(!dmabuf))
819 if (dmabuf->ops->begin_cpu_access)
820 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
822 /* Ensure that all fences are waited upon - but we first allow
823 * the native handler the chance to do so more efficiently if it
824 * chooses. A double invocation here will be reasonably cheap no-op.
827 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
831 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
834 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
835 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
836 * actions. Coherency is only guaranteed in the specified range for the
837 * specified access direction.
838 * @dmabuf: [in] buffer to complete cpu access for.
839 * @direction: [in] length of range for cpu access.
841 * This terminates CPU access started with dma_buf_begin_cpu_access().
843 * Can return negative error values, returns 0 on success.
845 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
846 enum dma_data_direction direction)
852 if (dmabuf->ops->end_cpu_access)
853 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
857 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
860 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
861 * space. The same restrictions as for kmap_atomic and friends apply.
862 * @dmabuf: [in] buffer to map page from.
863 * @page_num: [in] page in PAGE_SIZE units to map.
865 * This call must always succeed, any necessary preparations that might fail
866 * need to be done in begin_cpu_access.
868 void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
872 return dmabuf->ops->kmap_atomic(dmabuf, page_num);
874 EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
877 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
878 * @dmabuf: [in] buffer to unmap page from.
879 * @page_num: [in] page in PAGE_SIZE units to unmap.
880 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
882 * This call must always succeed.
884 void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
889 if (dmabuf->ops->kunmap_atomic)
890 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
892 EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
895 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
896 * same restrictions as for kmap and friends apply.
897 * @dmabuf: [in] buffer to map page from.
898 * @page_num: [in] page in PAGE_SIZE units to map.
900 * This call must always succeed, any necessary preparations that might fail
901 * need to be done in begin_cpu_access.
903 void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
907 return dmabuf->ops->kmap(dmabuf, page_num);
909 EXPORT_SYMBOL_GPL(dma_buf_kmap);
912 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
913 * @dmabuf: [in] buffer to unmap page from.
914 * @page_num: [in] page in PAGE_SIZE units to unmap.
915 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
917 * This call must always succeed.
919 void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
924 if (dmabuf->ops->kunmap)
925 dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
927 EXPORT_SYMBOL_GPL(dma_buf_kunmap);
931 * dma_buf_mmap - Setup up a userspace mmap with the given vma
932 * @dmabuf: [in] buffer that should back the vma
933 * @vma: [in] vma for the mmap
934 * @pgoff: [in] offset in pages where this mmap should start within the
937 * This function adjusts the passed in vma so that it points at the file of the
938 * dma_buf operation. It also adjusts the starting pgoff and does bounds
939 * checking on the size of the vma. Then it calls the exporters mmap function to
940 * set up the mapping.
942 * Can return negative error values, returns 0 on success.
944 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
947 struct file *oldfile;
950 if (WARN_ON(!dmabuf || !vma))
953 /* check for offset overflow */
954 if (pgoff + vma_pages(vma) < pgoff)
957 /* check for overflowing the buffer's size */
958 if (pgoff + vma_pages(vma) >
959 dmabuf->size >> PAGE_SHIFT)
962 /* readjust the vma */
963 get_file(dmabuf->file);
964 oldfile = vma->vm_file;
965 vma->vm_file = dmabuf->file;
966 vma->vm_pgoff = pgoff;
968 ret = dmabuf->ops->mmap(dmabuf, vma);
970 /* restore old parameters on failure */
971 vma->vm_file = oldfile;
980 EXPORT_SYMBOL_GPL(dma_buf_mmap);
983 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
984 * address space. Same restrictions as for vmap and friends apply.
985 * @dmabuf: [in] buffer to vmap
987 * This call may fail due to lack of virtual mapping address space.
988 * These calls are optional in drivers. The intended use for them
989 * is for mapping objects linear in kernel space for high use objects.
990 * Please attempt to use kmap/kunmap before thinking about these interfaces.
992 * Returns NULL on error.
994 void *dma_buf_vmap(struct dma_buf *dmabuf)
998 if (WARN_ON(!dmabuf))
1001 if (!dmabuf->ops->vmap)
1004 mutex_lock(&dmabuf->lock);
1005 if (dmabuf->vmapping_counter) {
1006 dmabuf->vmapping_counter++;
1007 BUG_ON(!dmabuf->vmap_ptr);
1008 ptr = dmabuf->vmap_ptr;
1012 BUG_ON(dmabuf->vmap_ptr);
1014 ptr = dmabuf->ops->vmap(dmabuf);
1015 if (WARN_ON_ONCE(IS_ERR(ptr)))
1020 dmabuf->vmap_ptr = ptr;
1021 dmabuf->vmapping_counter = 1;
1024 mutex_unlock(&dmabuf->lock);
1027 EXPORT_SYMBOL_GPL(dma_buf_vmap);
1030 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1031 * @dmabuf: [in] buffer to vunmap
1032 * @vaddr: [in] vmap to vunmap
1034 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
1036 if (WARN_ON(!dmabuf))
1039 BUG_ON(!dmabuf->vmap_ptr);
1040 BUG_ON(dmabuf->vmapping_counter == 0);
1041 BUG_ON(dmabuf->vmap_ptr != vaddr);
1043 mutex_lock(&dmabuf->lock);
1044 if (--dmabuf->vmapping_counter == 0) {
1045 if (dmabuf->ops->vunmap)
1046 dmabuf->ops->vunmap(dmabuf, vaddr);
1047 dmabuf->vmap_ptr = NULL;
1049 mutex_unlock(&dmabuf->lock);
1051 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1053 #ifdef CONFIG_DEBUG_FS
1054 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1057 struct dma_buf *buf_obj;
1058 struct dma_buf_attachment *attach_obj;
1059 int count = 0, attach_count;
1062 ret = mutex_lock_interruptible(&db_list.lock);
1067 seq_puts(s, "\nDma-buf Objects:\n");
1068 seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
1070 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1071 ret = mutex_lock_interruptible(&buf_obj->lock);
1075 "\tERROR locking buffer object: skipping\n");
1079 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
1081 buf_obj->file->f_flags, buf_obj->file->f_mode,
1082 file_count(buf_obj->file),
1085 seq_puts(s, "\tAttached Devices:\n");
1088 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1091 seq_printf(s, "%s\n", dev_name(attach_obj->dev));
1095 seq_printf(s, "Total %d devices attached\n\n",
1099 size += buf_obj->size;
1100 mutex_unlock(&buf_obj->lock);
1103 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1105 mutex_unlock(&db_list.lock);
1109 static int dma_buf_debug_open(struct inode *inode, struct file *file)
1111 return single_open(file, dma_buf_debug_show, NULL);
1114 static const struct file_operations dma_buf_debug_fops = {
1115 .open = dma_buf_debug_open,
1117 .llseek = seq_lseek,
1118 .release = single_release,
1121 static struct dentry *dma_buf_debugfs_dir;
1123 static int dma_buf_init_debugfs(void)
1128 d = debugfs_create_dir("dma_buf", NULL);
1132 dma_buf_debugfs_dir = d;
1134 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1135 NULL, &dma_buf_debug_fops);
1137 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1138 debugfs_remove_recursive(dma_buf_debugfs_dir);
1139 dma_buf_debugfs_dir = NULL;
1146 static void dma_buf_uninit_debugfs(void)
1148 if (dma_buf_debugfs_dir)
1149 debugfs_remove_recursive(dma_buf_debugfs_dir);
1152 static inline int dma_buf_init_debugfs(void)
1156 static inline void dma_buf_uninit_debugfs(void)
1161 static int __init dma_buf_init(void)
1163 mutex_init(&db_list.lock);
1164 INIT_LIST_HEAD(&db_list.head);
1165 dma_buf_init_debugfs();
1168 subsys_initcall(dma_buf_init);
1170 static void __exit dma_buf_deinit(void)
1172 dma_buf_uninit_debugfs();
1174 __exitcall(dma_buf_deinit);