3 * drivers/staging/android/ion/ion.c
5 * Copyright (C) 2011 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
42 #include "compat_ion.h"
44 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
46 return (buffer->flags & ION_FLAG_CACHED) &&
47 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
50 bool ion_buffer_cached(struct ion_buffer *buffer)
52 return !!(buffer->flags & ION_FLAG_CACHED);
55 static inline struct page *ion_buffer_page(struct page *page)
57 return (struct page *)((unsigned long)page & ~(1UL));
60 static inline bool ion_buffer_page_is_dirty(struct page *page)
62 return !!((unsigned long)page & 1UL);
65 static inline void ion_buffer_page_dirty(struct page **page)
67 *page = (struct page *)((unsigned long)(*page) | 1UL);
70 static inline void ion_buffer_page_clean(struct page **page)
72 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
75 /* this function should only be called while dev->lock is held */
76 static void ion_buffer_add(struct ion_device *dev,
77 struct ion_buffer *buffer)
79 struct rb_node **p = &dev->buffers.rb_node;
80 struct rb_node *parent = NULL;
81 struct ion_buffer *entry;
85 entry = rb_entry(parent, struct ion_buffer, node);
89 } else if (buffer > entry) {
92 pr_err("%s: buffer already found.", __func__);
97 rb_link_node(&buffer->node, parent, p);
98 rb_insert_color(&buffer->node, &dev->buffers);
101 /* this function should only be called while dev->lock is held */
102 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
103 struct ion_device *dev,
108 struct ion_buffer *buffer;
109 struct sg_table *table;
110 struct scatterlist *sg;
113 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
115 return ERR_PTR(-ENOMEM);
118 buffer->flags = flags;
119 kref_init(&buffer->ref);
121 ret = heap->ops->allocate(heap, buffer, len, align, flags);
124 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
127 ion_heap_freelist_drain(heap, 0);
128 ret = heap->ops->allocate(heap, buffer, len, align,
134 if (buffer->sg_table == NULL) {
135 WARN_ONCE(1, "This heap needs to set the sgtable");
140 table = buffer->sg_table;
144 if (ion_buffer_fault_user_mappings(buffer)) {
145 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
146 struct scatterlist *sg;
149 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
150 if (!buffer->pages) {
155 for_each_sg(table->sgl, sg, table->nents, i) {
156 struct page *page = sg_page(sg);
158 for (j = 0; j < sg->length / PAGE_SIZE; j++)
159 buffer->pages[k++] = page++;
165 INIT_LIST_HEAD(&buffer->vmas);
166 mutex_init(&buffer->lock);
168 * this will set up dma addresses for the sglist -- it is not
169 * technically correct as per the dma api -- a specific
170 * device isn't really taking ownership here. However, in practice on
171 * our systems the only dma_address space is physical addresses.
172 * Additionally, we can't afford the overhead of invalidating every
173 * allocation via dma_map_sg. The implicit contract here is that
174 * memory coming from the heaps is ready for dma, ie if it has a
175 * cached mapping that mapping has been invalidated
177 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
178 sg_dma_address(sg) = sg_phys(sg);
179 sg_dma_len(sg) = sg->length;
181 mutex_lock(&dev->buffer_lock);
182 ion_buffer_add(dev, buffer);
183 mutex_unlock(&dev->buffer_lock);
187 heap->ops->free(buffer);
193 void ion_buffer_destroy(struct ion_buffer *buffer)
195 if (WARN_ON(buffer->kmap_cnt > 0))
196 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
197 buffer->heap->ops->free(buffer);
198 vfree(buffer->pages);
202 static void _ion_buffer_destroy(struct kref *kref)
204 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
205 struct ion_heap *heap = buffer->heap;
206 struct ion_device *dev = buffer->dev;
208 mutex_lock(&dev->buffer_lock);
209 rb_erase(&buffer->node, &dev->buffers);
210 mutex_unlock(&dev->buffer_lock);
212 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
213 ion_heap_freelist_add(heap, buffer);
215 ion_buffer_destroy(buffer);
218 static void ion_buffer_get(struct ion_buffer *buffer)
220 kref_get(&buffer->ref);
223 static int ion_buffer_put(struct ion_buffer *buffer)
225 return kref_put(&buffer->ref, _ion_buffer_destroy);
228 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
230 mutex_lock(&buffer->lock);
231 buffer->handle_count++;
232 mutex_unlock(&buffer->lock);
235 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
238 * when a buffer is removed from a handle, if it is not in
239 * any other handles, copy the taskcomm and the pid of the
240 * process it's being removed from into the buffer. At this
241 * point there will be no way to track what processes this buffer is
242 * being used by, it only exists as a dma_buf file descriptor.
243 * The taskcomm and pid can provide a debug hint as to where this fd
246 mutex_lock(&buffer->lock);
247 buffer->handle_count--;
248 BUG_ON(buffer->handle_count < 0);
249 if (!buffer->handle_count) {
250 struct task_struct *task;
252 task = current->group_leader;
253 get_task_comm(buffer->task_comm, task);
254 buffer->pid = task_pid_nr(task);
256 mutex_unlock(&buffer->lock);
259 static struct ion_handle *ion_handle_create(struct ion_client *client,
260 struct ion_buffer *buffer)
262 struct ion_handle *handle;
264 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
266 return ERR_PTR(-ENOMEM);
267 kref_init(&handle->ref);
268 RB_CLEAR_NODE(&handle->node);
269 handle->client = client;
270 ion_buffer_get(buffer);
271 ion_buffer_add_to_handle(buffer);
272 handle->buffer = buffer;
277 static void ion_handle_kmap_put(struct ion_handle *);
279 static void ion_handle_destroy(struct kref *kref)
281 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
282 struct ion_client *client = handle->client;
283 struct ion_buffer *buffer = handle->buffer;
285 mutex_lock(&buffer->lock);
286 while (handle->kmap_cnt)
287 ion_handle_kmap_put(handle);
288 mutex_unlock(&buffer->lock);
290 idr_remove(&client->idr, handle->id);
291 if (!RB_EMPTY_NODE(&handle->node))
292 rb_erase(&handle->node, &client->handles);
294 ion_buffer_remove_from_handle(buffer);
295 ion_buffer_put(buffer);
300 static void ion_handle_get(struct ion_handle *handle)
302 kref_get(&handle->ref);
305 int ion_handle_put_nolock(struct ion_handle *handle)
307 return kref_put(&handle->ref, ion_handle_destroy);
310 int ion_handle_put(struct ion_handle *handle)
312 struct ion_client *client = handle->client;
315 mutex_lock(&client->lock);
316 ret = ion_handle_put_nolock(handle);
317 mutex_unlock(&client->lock);
322 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
323 struct ion_buffer *buffer)
325 struct rb_node *n = client->handles.rb_node;
328 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
330 if (buffer < entry->buffer)
332 else if (buffer > entry->buffer)
337 return ERR_PTR(-EINVAL);
340 struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
343 struct ion_handle *handle;
345 handle = idr_find(&client->idr, id);
347 ion_handle_get(handle);
349 return handle ? handle : ERR_PTR(-EINVAL);
352 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
355 struct ion_handle *handle;
357 mutex_lock(&client->lock);
358 handle = ion_handle_get_by_id_nolock(client, id);
359 mutex_unlock(&client->lock);
364 static bool ion_handle_validate(struct ion_client *client,
365 struct ion_handle *handle)
367 WARN_ON(!mutex_is_locked(&client->lock));
368 return idr_find(&client->idr, handle->id) == handle;
371 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
374 struct rb_node **p = &client->handles.rb_node;
375 struct rb_node *parent = NULL;
376 struct ion_handle *entry;
378 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
386 entry = rb_entry(parent, struct ion_handle, node);
388 if (handle->buffer < entry->buffer)
390 else if (handle->buffer > entry->buffer)
393 WARN(1, "%s: buffer already found.", __func__);
396 rb_link_node(&handle->node, parent, p);
397 rb_insert_color(&handle->node, &client->handles);
402 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
403 size_t align, unsigned int heap_id_mask,
406 struct ion_handle *handle;
407 struct ion_device *dev = client->dev;
408 struct ion_buffer *buffer = NULL;
409 struct ion_heap *heap;
412 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
413 len, align, heap_id_mask, flags);
415 * traverse the list of heaps available in this system in priority
416 * order. If the heap type is supported by the client, and matches the
417 * request of the caller allocate from it. Repeat until allocate has
418 * succeeded or all heaps have been tried
420 len = PAGE_ALIGN(len);
423 return ERR_PTR(-EINVAL);
425 down_read(&dev->lock);
426 plist_for_each_entry(heap, &dev->heaps, node) {
427 /* if the caller didn't specify this heap id */
428 if (!((1 << heap->id) & heap_id_mask))
430 buffer = ion_buffer_create(heap, dev, len, align, flags);
437 return ERR_PTR(-ENODEV);
440 return ERR_CAST(buffer);
442 handle = ion_handle_create(client, buffer);
445 * ion_buffer_create will create a buffer with a ref_cnt of 1,
446 * and ion_handle_create will take a second reference, drop one here
448 ion_buffer_put(buffer);
453 mutex_lock(&client->lock);
454 ret = ion_handle_add(client, handle);
455 mutex_unlock(&client->lock);
457 ion_handle_put(handle);
458 handle = ERR_PTR(ret);
463 EXPORT_SYMBOL(ion_alloc);
465 void ion_free_nolock(struct ion_client *client,
466 struct ion_handle *handle)
468 if (!ion_handle_validate(client, handle)) {
469 WARN(1, "%s: invalid handle passed to free.\n", __func__);
472 ion_handle_put_nolock(handle);
475 void ion_free(struct ion_client *client, struct ion_handle *handle)
477 BUG_ON(client != handle->client);
479 mutex_lock(&client->lock);
480 ion_free_nolock(client, handle);
481 mutex_unlock(&client->lock);
483 EXPORT_SYMBOL(ion_free);
485 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
489 if (buffer->kmap_cnt) {
491 return buffer->vaddr;
493 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
494 if (WARN_ONCE(vaddr == NULL,
495 "heap->ops->map_kernel should return ERR_PTR on error"))
496 return ERR_PTR(-EINVAL);
499 buffer->vaddr = vaddr;
504 static void *ion_handle_kmap_get(struct ion_handle *handle)
506 struct ion_buffer *buffer = handle->buffer;
509 if (handle->kmap_cnt) {
511 return buffer->vaddr;
513 vaddr = ion_buffer_kmap_get(buffer);
520 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
523 if (!buffer->kmap_cnt) {
524 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
525 buffer->vaddr = NULL;
529 static void ion_handle_kmap_put(struct ion_handle *handle)
531 struct ion_buffer *buffer = handle->buffer;
533 if (!handle->kmap_cnt) {
534 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
538 if (!handle->kmap_cnt)
539 ion_buffer_kmap_put(buffer);
542 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
544 struct ion_buffer *buffer;
547 mutex_lock(&client->lock);
548 if (!ion_handle_validate(client, handle)) {
549 pr_err("%s: invalid handle passed to map_kernel.\n",
551 mutex_unlock(&client->lock);
552 return ERR_PTR(-EINVAL);
555 buffer = handle->buffer;
557 if (!handle->buffer->heap->ops->map_kernel) {
558 pr_err("%s: map_kernel is not implemented by this heap.\n",
560 mutex_unlock(&client->lock);
561 return ERR_PTR(-ENODEV);
564 mutex_lock(&buffer->lock);
565 vaddr = ion_handle_kmap_get(handle);
566 mutex_unlock(&buffer->lock);
567 mutex_unlock(&client->lock);
570 EXPORT_SYMBOL(ion_map_kernel);
572 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
574 struct ion_buffer *buffer;
576 mutex_lock(&client->lock);
577 buffer = handle->buffer;
578 mutex_lock(&buffer->lock);
579 ion_handle_kmap_put(handle);
580 mutex_unlock(&buffer->lock);
581 mutex_unlock(&client->lock);
583 EXPORT_SYMBOL(ion_unmap_kernel);
585 static struct mutex debugfs_mutex;
586 static struct rb_root *ion_root_client;
587 static int is_client_alive(struct ion_client *client)
589 struct rb_node *node;
590 struct ion_client *tmp;
591 struct ion_device *dev;
593 node = ion_root_client->rb_node;
594 dev = container_of(ion_root_client, struct ion_device, clients);
596 down_read(&dev->lock);
598 tmp = rb_entry(node, struct ion_client, node);
600 node = node->rb_left;
601 } else if (client > tmp) {
602 node = node->rb_right;
613 static int ion_debug_client_show(struct seq_file *s, void *unused)
615 struct ion_client *client = s->private;
617 size_t sizes[ION_NUM_HEAP_IDS] = {0};
618 const char *names[ION_NUM_HEAP_IDS] = {NULL};
621 mutex_lock(&debugfs_mutex);
622 if (!is_client_alive(client)) {
623 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
625 mutex_unlock(&debugfs_mutex);
629 mutex_lock(&client->lock);
630 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
631 struct ion_handle *handle = rb_entry(n, struct ion_handle,
633 unsigned int id = handle->buffer->heap->id;
636 names[id] = handle->buffer->heap->name;
637 sizes[id] += handle->buffer->size;
639 mutex_unlock(&client->lock);
640 mutex_unlock(&debugfs_mutex);
642 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
643 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
646 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
651 static int ion_debug_client_open(struct inode *inode, struct file *file)
653 return single_open(file, ion_debug_client_show, inode->i_private);
656 static const struct file_operations debug_client_fops = {
657 .open = ion_debug_client_open,
660 .release = single_release,
663 static int ion_get_client_serial(const struct rb_root *root,
664 const unsigned char *name)
667 struct rb_node *node;
669 for (node = rb_first(root); node; node = rb_next(node)) {
670 struct ion_client *client = rb_entry(node, struct ion_client,
673 if (strcmp(client->name, name))
675 serial = max(serial, client->display_serial);
680 struct ion_client *ion_client_create(struct ion_device *dev,
683 struct ion_client *client;
684 struct task_struct *task;
686 struct rb_node *parent = NULL;
687 struct ion_client *entry;
691 pr_err("%s: Name cannot be null\n", __func__);
692 return ERR_PTR(-EINVAL);
695 get_task_struct(current->group_leader);
696 task_lock(current->group_leader);
697 pid = task_pid_nr(current->group_leader);
699 * don't bother to store task struct for kernel threads,
700 * they can't be killed anyway
702 if (current->group_leader->flags & PF_KTHREAD) {
703 put_task_struct(current->group_leader);
706 task = current->group_leader;
708 task_unlock(current->group_leader);
710 client = kzalloc(sizeof(*client), GFP_KERNEL);
712 goto err_put_task_struct;
715 client->handles = RB_ROOT;
716 idr_init(&client->idr);
717 mutex_init(&client->lock);
720 client->name = kstrdup(name, GFP_KERNEL);
722 goto err_free_client;
724 down_write(&dev->lock);
725 client->display_serial = ion_get_client_serial(&dev->clients, name);
726 client->display_name = kasprintf(
727 GFP_KERNEL, "%s-%d", name, client->display_serial);
728 if (!client->display_name) {
729 up_write(&dev->lock);
730 goto err_free_client_name;
732 p = &dev->clients.rb_node;
735 entry = rb_entry(parent, struct ion_client, node);
739 else if (client > entry)
742 rb_link_node(&client->node, parent, p);
743 rb_insert_color(&client->node, &dev->clients);
745 client->debug_root = debugfs_create_file(client->display_name, 0664,
746 dev->clients_debug_root,
747 client, &debug_client_fops);
748 if (!client->debug_root) {
749 char buf[256], *path;
751 path = dentry_path(dev->clients_debug_root, buf, 256);
752 pr_err("Failed to create client debugfs at %s/%s\n",
753 path, client->display_name);
756 up_write(&dev->lock);
760 err_free_client_name:
766 put_task_struct(current->group_leader);
767 return ERR_PTR(-ENOMEM);
769 EXPORT_SYMBOL(ion_client_create);
771 void ion_client_destroy(struct ion_client *client)
773 struct ion_device *dev = client->dev;
776 pr_debug("%s: %d\n", __func__, __LINE__);
777 mutex_lock(&debugfs_mutex);
778 while ((n = rb_first(&client->handles))) {
779 struct ion_handle *handle = rb_entry(n, struct ion_handle,
781 ion_handle_destroy(&handle->ref);
784 idr_destroy(&client->idr);
786 down_write(&dev->lock);
788 put_task_struct(client->task);
789 rb_erase(&client->node, &dev->clients);
790 debugfs_remove_recursive(client->debug_root);
791 up_write(&dev->lock);
793 kfree(client->display_name);
796 mutex_unlock(&debugfs_mutex);
798 EXPORT_SYMBOL(ion_client_destroy);
800 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
802 enum dma_data_direction direction);
804 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
805 enum dma_data_direction direction)
807 struct dma_buf *dmabuf = attachment->dmabuf;
808 struct ion_buffer *buffer = dmabuf->priv;
810 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
811 return buffer->sg_table;
814 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
815 struct sg_table *table,
816 enum dma_data_direction direction)
820 void ion_pages_sync_for_device(struct device *dev, struct page *page,
821 size_t size, enum dma_data_direction dir)
823 struct scatterlist sg;
825 sg_init_table(&sg, 1);
826 sg_set_page(&sg, page, size, 0);
828 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
829 * for the targeted device, but this works on the currently targeted
832 sg_dma_address(&sg) = page_to_phys(page);
833 dma_sync_sg_for_device(dev, &sg, 1, dir);
836 struct ion_vma_list {
837 struct list_head list;
838 struct vm_area_struct *vma;
841 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
843 enum dma_data_direction dir)
845 struct ion_vma_list *vma_list;
846 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
849 pr_debug("%s: syncing for device %s\n", __func__,
850 dev ? dev_name(dev) : "null");
852 if (!ion_buffer_fault_user_mappings(buffer))
855 mutex_lock(&buffer->lock);
856 for (i = 0; i < pages; i++) {
857 struct page *page = buffer->pages[i];
859 if (ion_buffer_page_is_dirty(page))
860 ion_pages_sync_for_device(dev, ion_buffer_page(page),
863 ion_buffer_page_clean(buffer->pages + i);
865 list_for_each_entry(vma_list, &buffer->vmas, list) {
866 struct vm_area_struct *vma = vma_list->vma;
868 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
871 mutex_unlock(&buffer->lock);
874 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
876 struct ion_buffer *buffer = vma->vm_private_data;
880 mutex_lock(&buffer->lock);
881 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
882 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
884 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
885 ret = vm_insert_pfn(vma, vmf->address, pfn);
886 mutex_unlock(&buffer->lock);
888 return VM_FAULT_ERROR;
890 return VM_FAULT_NOPAGE;
893 static void ion_vm_open(struct vm_area_struct *vma)
895 struct ion_buffer *buffer = vma->vm_private_data;
896 struct ion_vma_list *vma_list;
898 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
902 mutex_lock(&buffer->lock);
903 list_add(&vma_list->list, &buffer->vmas);
904 mutex_unlock(&buffer->lock);
905 pr_debug("%s: adding %p\n", __func__, vma);
908 static void ion_vm_close(struct vm_area_struct *vma)
910 struct ion_buffer *buffer = vma->vm_private_data;
911 struct ion_vma_list *vma_list, *tmp;
913 pr_debug("%s\n", __func__);
914 mutex_lock(&buffer->lock);
915 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
916 if (vma_list->vma != vma)
918 list_del(&vma_list->list);
920 pr_debug("%s: deleting %p\n", __func__, vma);
923 mutex_unlock(&buffer->lock);
926 static const struct vm_operations_struct ion_vma_ops = {
928 .close = ion_vm_close,
929 .fault = ion_vm_fault,
932 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
934 struct ion_buffer *buffer = dmabuf->priv;
937 if (!buffer->heap->ops->map_user) {
938 pr_err("%s: this heap does not define a method for mapping to userspace\n",
943 if (ion_buffer_fault_user_mappings(buffer)) {
944 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
946 vma->vm_private_data = buffer;
947 vma->vm_ops = &ion_vma_ops;
952 if (!(buffer->flags & ION_FLAG_CACHED))
953 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
955 mutex_lock(&buffer->lock);
956 /* now map it to userspace */
957 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
958 mutex_unlock(&buffer->lock);
961 pr_err("%s: failure mapping buffer to userspace\n",
967 static void ion_dma_buf_release(struct dma_buf *dmabuf)
969 struct ion_buffer *buffer = dmabuf->priv;
971 ion_buffer_put(buffer);
974 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
976 struct ion_buffer *buffer = dmabuf->priv;
978 return buffer->vaddr + offset * PAGE_SIZE;
981 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
986 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
987 enum dma_data_direction direction)
989 struct ion_buffer *buffer = dmabuf->priv;
992 if (!buffer->heap->ops->map_kernel) {
993 pr_err("%s: map kernel is not implemented by this heap.\n",
998 mutex_lock(&buffer->lock);
999 vaddr = ion_buffer_kmap_get(buffer);
1000 mutex_unlock(&buffer->lock);
1001 return PTR_ERR_OR_ZERO(vaddr);
1004 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1005 enum dma_data_direction direction)
1007 struct ion_buffer *buffer = dmabuf->priv;
1009 mutex_lock(&buffer->lock);
1010 ion_buffer_kmap_put(buffer);
1011 mutex_unlock(&buffer->lock);
1016 static const struct dma_buf_ops dma_buf_ops = {
1017 .map_dma_buf = ion_map_dma_buf,
1018 .unmap_dma_buf = ion_unmap_dma_buf,
1020 .release = ion_dma_buf_release,
1021 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1022 .end_cpu_access = ion_dma_buf_end_cpu_access,
1023 .kmap_atomic = ion_dma_buf_kmap,
1024 .kunmap_atomic = ion_dma_buf_kunmap,
1025 .kmap = ion_dma_buf_kmap,
1026 .kunmap = ion_dma_buf_kunmap,
1029 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1030 struct ion_handle *handle)
1032 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1033 struct ion_buffer *buffer;
1034 struct dma_buf *dmabuf;
1037 mutex_lock(&client->lock);
1038 valid_handle = ion_handle_validate(client, handle);
1039 if (!valid_handle) {
1040 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1041 mutex_unlock(&client->lock);
1042 return ERR_PTR(-EINVAL);
1044 buffer = handle->buffer;
1045 ion_buffer_get(buffer);
1046 mutex_unlock(&client->lock);
1048 exp_info.ops = &dma_buf_ops;
1049 exp_info.size = buffer->size;
1050 exp_info.flags = O_RDWR;
1051 exp_info.priv = buffer;
1053 dmabuf = dma_buf_export(&exp_info);
1054 if (IS_ERR(dmabuf)) {
1055 ion_buffer_put(buffer);
1061 EXPORT_SYMBOL(ion_share_dma_buf);
1063 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1065 struct dma_buf *dmabuf;
1068 dmabuf = ion_share_dma_buf(client, handle);
1070 return PTR_ERR(dmabuf);
1072 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1074 dma_buf_put(dmabuf);
1078 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1080 struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1081 struct dma_buf *dmabuf)
1083 struct ion_buffer *buffer;
1084 struct ion_handle *handle;
1087 /* if this memory came from ion */
1089 if (dmabuf->ops != &dma_buf_ops) {
1090 pr_err("%s: can not import dmabuf from another exporter\n",
1092 return ERR_PTR(-EINVAL);
1094 buffer = dmabuf->priv;
1096 mutex_lock(&client->lock);
1097 /* if a handle exists for this buffer just take a reference to it */
1098 handle = ion_handle_lookup(client, buffer);
1099 if (!IS_ERR(handle)) {
1100 ion_handle_get(handle);
1101 mutex_unlock(&client->lock);
1105 handle = ion_handle_create(client, buffer);
1106 if (IS_ERR(handle)) {
1107 mutex_unlock(&client->lock);
1111 ret = ion_handle_add(client, handle);
1112 mutex_unlock(&client->lock);
1114 ion_handle_put(handle);
1115 handle = ERR_PTR(ret);
1121 EXPORT_SYMBOL(ion_import_dma_buf);
1123 struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1125 struct dma_buf *dmabuf;
1126 struct ion_handle *handle;
1128 dmabuf = dma_buf_get(fd);
1130 return ERR_CAST(dmabuf);
1132 handle = ion_import_dma_buf(client, dmabuf);
1133 dma_buf_put(dmabuf);
1136 EXPORT_SYMBOL(ion_import_dma_buf_fd);
1138 int ion_sync_for_device(struct ion_client *client, int fd)
1140 struct dma_buf *dmabuf;
1141 struct ion_buffer *buffer;
1143 dmabuf = dma_buf_get(fd);
1145 return PTR_ERR(dmabuf);
1147 /* if this memory came from ion */
1148 if (dmabuf->ops != &dma_buf_ops) {
1149 pr_err("%s: can not sync dmabuf from another exporter\n",
1151 dma_buf_put(dmabuf);
1154 buffer = dmabuf->priv;
1156 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1157 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1158 dma_buf_put(dmabuf);
1162 int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
1164 struct ion_device *dev = client->dev;
1165 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
1166 int ret = -EINVAL, cnt = 0, max_cnt;
1167 struct ion_heap *heap;
1168 struct ion_heap_data hdata;
1170 memset(&hdata, 0, sizeof(hdata));
1172 down_read(&dev->lock);
1174 query->cnt = dev->heap_cnt;
1179 if (query->cnt <= 0)
1182 max_cnt = query->cnt;
1184 plist_for_each_entry(heap, &dev->heaps, node) {
1185 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
1186 hdata.name[sizeof(hdata.name) - 1] = '\0';
1187 hdata.type = heap->type;
1188 hdata.heap_id = heap->id;
1190 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
1202 up_read(&dev->lock);
1206 static int ion_release(struct inode *inode, struct file *file)
1208 struct ion_client *client = file->private_data;
1210 pr_debug("%s: %d\n", __func__, __LINE__);
1211 ion_client_destroy(client);
1215 static int ion_open(struct inode *inode, struct file *file)
1217 struct miscdevice *miscdev = file->private_data;
1218 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1219 struct ion_client *client;
1220 char debug_name[64];
1222 pr_debug("%s: %d\n", __func__, __LINE__);
1223 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1224 client = ion_client_create(dev, debug_name);
1226 return PTR_ERR(client);
1227 file->private_data = client;
1232 static const struct file_operations ion_fops = {
1233 .owner = THIS_MODULE,
1235 .release = ion_release,
1236 .unlocked_ioctl = ion_ioctl,
1237 .compat_ioctl = compat_ion_ioctl,
1240 static size_t ion_debug_heap_total(struct ion_client *client,
1246 mutex_lock(&client->lock);
1247 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1248 struct ion_handle *handle = rb_entry(n,
1251 if (handle->buffer->heap->id == id)
1252 size += handle->buffer->size;
1254 mutex_unlock(&client->lock);
1258 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1260 struct ion_heap *heap = s->private;
1261 struct ion_device *dev = heap->dev;
1263 size_t total_size = 0;
1264 size_t total_orphaned_size = 0;
1266 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1267 seq_puts(s, "----------------------------------------------------\n");
1269 mutex_lock(&debugfs_mutex);
1270 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1271 struct ion_client *client = rb_entry(n, struct ion_client,
1273 size_t size = ion_debug_heap_total(client, heap->id);
1278 char task_comm[TASK_COMM_LEN];
1280 get_task_comm(task_comm, client->task);
1281 seq_printf(s, "%16s %16u %16zu\n", task_comm,
1284 seq_printf(s, "%16s %16u %16zu\n", client->name,
1288 mutex_unlock(&debugfs_mutex);
1290 seq_puts(s, "----------------------------------------------------\n");
1291 seq_puts(s, "orphaned allocations (info is from last known client):\n");
1292 mutex_lock(&dev->buffer_lock);
1293 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1294 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1296 if (buffer->heap->id != heap->id)
1298 total_size += buffer->size;
1299 if (!buffer->handle_count) {
1300 seq_printf(s, "%16s %16u %16zu %d %d\n",
1301 buffer->task_comm, buffer->pid,
1302 buffer->size, buffer->kmap_cnt,
1303 kref_read(&buffer->ref));
1304 total_orphaned_size += buffer->size;
1307 mutex_unlock(&dev->buffer_lock);
1308 seq_puts(s, "----------------------------------------------------\n");
1309 seq_printf(s, "%16s %16zu\n", "total orphaned",
1310 total_orphaned_size);
1311 seq_printf(s, "%16s %16zu\n", "total ", total_size);
1312 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1313 seq_printf(s, "%16s %16zu\n", "deferred free",
1314 heap->free_list_size);
1315 seq_puts(s, "----------------------------------------------------\n");
1317 if (heap->debug_show)
1318 heap->debug_show(heap, s, unused);
1323 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1325 return single_open(file, ion_debug_heap_show, inode->i_private);
1328 static const struct file_operations debug_heap_fops = {
1329 .open = ion_debug_heap_open,
1331 .llseek = seq_lseek,
1332 .release = single_release,
1335 static int debug_shrink_set(void *data, u64 val)
1337 struct ion_heap *heap = data;
1338 struct shrink_control sc;
1341 sc.gfp_mask = GFP_HIGHUSER;
1342 sc.nr_to_scan = val;
1345 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1346 sc.nr_to_scan = objs;
1349 heap->shrinker.scan_objects(&heap->shrinker, &sc);
1353 static int debug_shrink_get(void *data, u64 *val)
1355 struct ion_heap *heap = data;
1356 struct shrink_control sc;
1359 sc.gfp_mask = GFP_HIGHUSER;
1362 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1367 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1368 debug_shrink_set, "%llu\n");
1370 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1372 struct dentry *debug_file;
1374 if (!heap->ops->allocate || !heap->ops->free)
1375 pr_err("%s: can not add heap with invalid ops struct.\n",
1378 spin_lock_init(&heap->free_lock);
1379 heap->free_list_size = 0;
1381 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1382 ion_heap_init_deferred_free(heap);
1384 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1385 ion_heap_init_shrinker(heap);
1388 down_write(&dev->lock);
1390 * use negative heap->id to reverse the priority -- when traversing
1391 * the list later attempt higher id numbers first
1393 plist_node_init(&heap->node, -heap->id);
1394 plist_add(&heap->node, &dev->heaps);
1395 debug_file = debugfs_create_file(heap->name, 0664,
1396 dev->heaps_debug_root, heap,
1400 char buf[256], *path;
1402 path = dentry_path(dev->heaps_debug_root, buf, 256);
1403 pr_err("Failed to create heap debugfs at %s/%s\n",
1407 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1408 char debug_name[64];
1410 snprintf(debug_name, 64, "%s_shrink", heap->name);
1411 debug_file = debugfs_create_file(
1412 debug_name, 0644, dev->heaps_debug_root, heap,
1413 &debug_shrink_fops);
1415 char buf[256], *path;
1417 path = dentry_path(dev->heaps_debug_root, buf, 256);
1418 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1424 up_write(&dev->lock);
1426 EXPORT_SYMBOL(ion_device_add_heap);
1428 struct ion_device *ion_device_create(long (*custom_ioctl)
1429 (struct ion_client *client,
1433 struct ion_device *idev;
1436 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
1438 return ERR_PTR(-ENOMEM);
1440 idev->dev.minor = MISC_DYNAMIC_MINOR;
1441 idev->dev.name = "ion";
1442 idev->dev.fops = &ion_fops;
1443 idev->dev.parent = NULL;
1444 ret = misc_register(&idev->dev);
1446 pr_err("ion: failed to register misc device.\n");
1448 return ERR_PTR(ret);
1451 idev->debug_root = debugfs_create_dir("ion", NULL);
1452 if (!idev->debug_root) {
1453 pr_err("ion: failed to create debugfs root directory.\n");
1456 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1457 if (!idev->heaps_debug_root) {
1458 pr_err("ion: failed to create debugfs heaps directory.\n");
1461 idev->clients_debug_root = debugfs_create_dir("clients",
1463 if (!idev->clients_debug_root)
1464 pr_err("ion: failed to create debugfs clients directory.\n");
1468 idev->custom_ioctl = custom_ioctl;
1469 idev->buffers = RB_ROOT;
1470 mutex_init(&idev->buffer_lock);
1471 init_rwsem(&idev->lock);
1472 plist_head_init(&idev->heaps);
1473 idev->clients = RB_ROOT;
1474 ion_root_client = &idev->clients;
1475 mutex_init(&debugfs_mutex);
1478 EXPORT_SYMBOL(ion_device_create);
1480 void ion_device_destroy(struct ion_device *dev)
1482 misc_deregister(&dev->dev);
1483 debugfs_remove_recursive(dev->debug_root);
1484 /* XXX need to free the heaps and clients ? */
1487 EXPORT_SYMBOL(ion_device_destroy);