]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/android/ion/ion.c
Merge remote-tracking branch 'usb-chipidea-next/ci-for-usb-next'
[karo-tx-linux.git] / drivers / staging / android / ion / ion.c
1 /*
2  *
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39
40 #include "ion.h"
41 #include "ion_priv.h"
42 #include "compat_ion.h"
43
44 /**
45  * struct ion_device - the metadata of the ion device node
46  * @dev:                the actual misc device
47  * @buffers:            an rb tree of all the existing buffers
48  * @buffer_lock:        lock protecting the tree of buffers
49  * @lock:               rwsem protecting the tree of heaps and clients
50  * @heaps:              list of all the heaps in the system
51  * @user_clients:       list of all the clients created from userspace
52  */
53 struct ion_device {
54         struct miscdevice dev;
55         struct rb_root buffers;
56         struct mutex buffer_lock;
57         struct rw_semaphore lock;
58         struct plist_head heaps;
59         long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60                              unsigned long arg);
61         struct rb_root clients;
62         struct dentry *debug_root;
63         struct dentry *heaps_debug_root;
64         struct dentry *clients_debug_root;
65 };
66
67 /**
68  * struct ion_client - a process/hw block local address space
69  * @node:               node in the tree of all clients
70  * @dev:                backpointer to ion device
71  * @handles:            an rb tree of all the handles in this client
72  * @idr:                an idr space for allocating handle ids
73  * @lock:               lock protecting the tree of handles
74  * @name:               used for debugging
75  * @display_name:       used for debugging (unique version of @name)
76  * @display_serial:     used for debugging (to make display_name unique)
77  * @task:               used for debugging
78  *
79  * A client represents a list of buffers this client may access.
80  * The mutex stored here is used to protect both handles tree
81  * as well as the handles themselves, and should be held while modifying either.
82  */
83 struct ion_client {
84         struct rb_node node;
85         struct ion_device *dev;
86         struct rb_root handles;
87         struct idr idr;
88         struct mutex lock;
89         const char *name;
90         char *display_name;
91         int display_serial;
92         struct task_struct *task;
93         pid_t pid;
94         struct dentry *debug_root;
95 };
96
97 /**
98  * ion_handle - a client local reference to a buffer
99  * @ref:                reference count
100  * @client:             back pointer to the client the buffer resides in
101  * @buffer:             pointer to the buffer
102  * @node:               node in the client's handle rbtree
103  * @kmap_cnt:           count of times this client has mapped to kernel
104  * @id:                 client-unique id allocated by client->idr
105  *
106  * Modifications to node, map_cnt or mapping should be protected by the
107  * lock in the client.  Other fields are never changed after initialization.
108  */
109 struct ion_handle {
110         struct kref ref;
111         struct ion_client *client;
112         struct ion_buffer *buffer;
113         struct rb_node node;
114         unsigned int kmap_cnt;
115         int id;
116 };
117
118 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119 {
120         return (buffer->flags & ION_FLAG_CACHED) &&
121                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
122 }
123
124 bool ion_buffer_cached(struct ion_buffer *buffer)
125 {
126         return !!(buffer->flags & ION_FLAG_CACHED);
127 }
128
129 static inline struct page *ion_buffer_page(struct page *page)
130 {
131         return (struct page *)((unsigned long)page & ~(1UL));
132 }
133
134 static inline bool ion_buffer_page_is_dirty(struct page *page)
135 {
136         return !!((unsigned long)page & 1UL);
137 }
138
139 static inline void ion_buffer_page_dirty(struct page **page)
140 {
141         *page = (struct page *)((unsigned long)(*page) | 1UL);
142 }
143
144 static inline void ion_buffer_page_clean(struct page **page)
145 {
146         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
147 }
148
149 /* this function should only be called while dev->lock is held */
150 static void ion_buffer_add(struct ion_device *dev,
151                            struct ion_buffer *buffer)
152 {
153         struct rb_node **p = &dev->buffers.rb_node;
154         struct rb_node *parent = NULL;
155         struct ion_buffer *entry;
156
157         while (*p) {
158                 parent = *p;
159                 entry = rb_entry(parent, struct ion_buffer, node);
160
161                 if (buffer < entry) {
162                         p = &(*p)->rb_left;
163                 } else if (buffer > entry) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         pr_err("%s: buffer already found.", __func__);
167                         BUG();
168                 }
169         }
170
171         rb_link_node(&buffer->node, parent, p);
172         rb_insert_color(&buffer->node, &dev->buffers);
173 }
174
175 /* this function should only be called while dev->lock is held */
176 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177                                      struct ion_device *dev,
178                                      unsigned long len,
179                                      unsigned long align,
180                                      unsigned long flags)
181 {
182         struct ion_buffer *buffer;
183         struct sg_table *table;
184         struct scatterlist *sg;
185         int i, ret;
186
187         buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
188         if (!buffer)
189                 return ERR_PTR(-ENOMEM);
190
191         buffer->heap = heap;
192         buffer->flags = flags;
193         kref_init(&buffer->ref);
194
195         ret = heap->ops->allocate(heap, buffer, len, align, flags);
196
197         if (ret) {
198                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199                         goto err2;
200
201                 ion_heap_freelist_drain(heap, 0);
202                 ret = heap->ops->allocate(heap, buffer, len, align,
203                                           flags);
204                 if (ret)
205                         goto err2;
206         }
207
208         buffer->dev = dev;
209         buffer->size = len;
210
211         table = heap->ops->map_dma(heap, buffer);
212         if (WARN_ONCE(table == NULL,
213                         "heap->ops->map_dma should return ERR_PTR on error"))
214                 table = ERR_PTR(-EINVAL);
215         if (IS_ERR(table)) {
216                 ret = -EINVAL;
217                 goto err1;
218         }
219
220         buffer->sg_table = table;
221         if (ion_buffer_fault_user_mappings(buffer)) {
222                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223                 struct scatterlist *sg;
224                 int i, j, k = 0;
225
226                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227                 if (!buffer->pages) {
228                         ret = -ENOMEM;
229                         goto err;
230                 }
231
232                 for_each_sg(table->sgl, sg, table->nents, i) {
233                         struct page *page = sg_page(sg);
234
235                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
236                                 buffer->pages[k++] = page++;
237                 }
238         }
239
240         buffer->dev = dev;
241         buffer->size = len;
242         INIT_LIST_HEAD(&buffer->vmas);
243         mutex_init(&buffer->lock);
244         /*
245          * this will set up dma addresses for the sglist -- it is not
246          * technically correct as per the dma api -- a specific
247          * device isn't really taking ownership here.  However, in practice on
248          * our systems the only dma_address space is physical addresses.
249          * Additionally, we can't afford the overhead of invalidating every
250          * allocation via dma_map_sg. The implicit contract here is that
251          * memory coming from the heaps is ready for dma, ie if it has a
252          * cached mapping that mapping has been invalidated
253          */
254         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
255                 sg_dma_address(sg) = sg_phys(sg);
256         mutex_lock(&dev->buffer_lock);
257         ion_buffer_add(dev, buffer);
258         mutex_unlock(&dev->buffer_lock);
259         return buffer;
260
261 err:
262         heap->ops->unmap_dma(heap, buffer);
263 err1:
264         heap->ops->free(buffer);
265 err2:
266         kfree(buffer);
267         return ERR_PTR(ret);
268 }
269
270 void ion_buffer_destroy(struct ion_buffer *buffer)
271 {
272         if (WARN_ON(buffer->kmap_cnt > 0))
273                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
274         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
275         buffer->heap->ops->free(buffer);
276         vfree(buffer->pages);
277         kfree(buffer);
278 }
279
280 static void _ion_buffer_destroy(struct kref *kref)
281 {
282         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
283         struct ion_heap *heap = buffer->heap;
284         struct ion_device *dev = buffer->dev;
285
286         mutex_lock(&dev->buffer_lock);
287         rb_erase(&buffer->node, &dev->buffers);
288         mutex_unlock(&dev->buffer_lock);
289
290         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
291                 ion_heap_freelist_add(heap, buffer);
292         else
293                 ion_buffer_destroy(buffer);
294 }
295
296 static void ion_buffer_get(struct ion_buffer *buffer)
297 {
298         kref_get(&buffer->ref);
299 }
300
301 static int ion_buffer_put(struct ion_buffer *buffer)
302 {
303         return kref_put(&buffer->ref, _ion_buffer_destroy);
304 }
305
306 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
307 {
308         mutex_lock(&buffer->lock);
309         buffer->handle_count++;
310         mutex_unlock(&buffer->lock);
311 }
312
313 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
314 {
315         /*
316          * when a buffer is removed from a handle, if it is not in
317          * any other handles, copy the taskcomm and the pid of the
318          * process it's being removed from into the buffer.  At this
319          * point there will be no way to track what processes this buffer is
320          * being used by, it only exists as a dma_buf file descriptor.
321          * The taskcomm and pid can provide a debug hint as to where this fd
322          * is in the system
323          */
324         mutex_lock(&buffer->lock);
325         buffer->handle_count--;
326         BUG_ON(buffer->handle_count < 0);
327         if (!buffer->handle_count) {
328                 struct task_struct *task;
329
330                 task = current->group_leader;
331                 get_task_comm(buffer->task_comm, task);
332                 buffer->pid = task_pid_nr(task);
333         }
334         mutex_unlock(&buffer->lock);
335 }
336
337 static struct ion_handle *ion_handle_create(struct ion_client *client,
338                                      struct ion_buffer *buffer)
339 {
340         struct ion_handle *handle;
341
342         handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
343         if (!handle)
344                 return ERR_PTR(-ENOMEM);
345         kref_init(&handle->ref);
346         RB_CLEAR_NODE(&handle->node);
347         handle->client = client;
348         ion_buffer_get(buffer);
349         ion_buffer_add_to_handle(buffer);
350         handle->buffer = buffer;
351
352         return handle;
353 }
354
355 static void ion_handle_kmap_put(struct ion_handle *);
356
357 static void ion_handle_destroy(struct kref *kref)
358 {
359         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
360         struct ion_client *client = handle->client;
361         struct ion_buffer *buffer = handle->buffer;
362
363         mutex_lock(&buffer->lock);
364         while (handle->kmap_cnt)
365                 ion_handle_kmap_put(handle);
366         mutex_unlock(&buffer->lock);
367
368         idr_remove(&client->idr, handle->id);
369         if (!RB_EMPTY_NODE(&handle->node))
370                 rb_erase(&handle->node, &client->handles);
371
372         ion_buffer_remove_from_handle(buffer);
373         ion_buffer_put(buffer);
374
375         kfree(handle);
376 }
377
378 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
379 {
380         return handle->buffer;
381 }
382
383 static void ion_handle_get(struct ion_handle *handle)
384 {
385         kref_get(&handle->ref);
386 }
387
388 static int ion_handle_put(struct ion_handle *handle)
389 {
390         struct ion_client *client = handle->client;
391         int ret;
392
393         mutex_lock(&client->lock);
394         ret = kref_put(&handle->ref, ion_handle_destroy);
395         mutex_unlock(&client->lock);
396
397         return ret;
398 }
399
400 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
401                                             struct ion_buffer *buffer)
402 {
403         struct rb_node *n = client->handles.rb_node;
404
405         while (n) {
406                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
407
408                 if (buffer < entry->buffer)
409                         n = n->rb_left;
410                 else if (buffer > entry->buffer)
411                         n = n->rb_right;
412                 else
413                         return entry;
414         }
415         return ERR_PTR(-EINVAL);
416 }
417
418 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
419                                                 int id)
420 {
421         struct ion_handle *handle;
422
423         mutex_lock(&client->lock);
424         handle = idr_find(&client->idr, id);
425         if (handle)
426                 ion_handle_get(handle);
427         mutex_unlock(&client->lock);
428
429         return handle ? handle : ERR_PTR(-EINVAL);
430 }
431
432 static bool ion_handle_validate(struct ion_client *client,
433                                 struct ion_handle *handle)
434 {
435         WARN_ON(!mutex_is_locked(&client->lock));
436         return idr_find(&client->idr, handle->id) == handle;
437 }
438
439 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
440 {
441         int id;
442         struct rb_node **p = &client->handles.rb_node;
443         struct rb_node *parent = NULL;
444         struct ion_handle *entry;
445
446         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
447         if (id < 0)
448                 return id;
449
450         handle->id = id;
451
452         while (*p) {
453                 parent = *p;
454                 entry = rb_entry(parent, struct ion_handle, node);
455
456                 if (handle->buffer < entry->buffer)
457                         p = &(*p)->rb_left;
458                 else if (handle->buffer > entry->buffer)
459                         p = &(*p)->rb_right;
460                 else
461                         WARN(1, "%s: buffer already found.", __func__);
462         }
463
464         rb_link_node(&handle->node, parent, p);
465         rb_insert_color(&handle->node, &client->handles);
466
467         return 0;
468 }
469
470 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
471                              size_t align, unsigned int heap_id_mask,
472                              unsigned int flags)
473 {
474         struct ion_handle *handle;
475         struct ion_device *dev = client->dev;
476         struct ion_buffer *buffer = NULL;
477         struct ion_heap *heap;
478         int ret;
479
480         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
481                  len, align, heap_id_mask, flags);
482         /*
483          * traverse the list of heaps available in this system in priority
484          * order.  If the heap type is supported by the client, and matches the
485          * request of the caller allocate from it.  Repeat until allocate has
486          * succeeded or all heaps have been tried
487          */
488         len = PAGE_ALIGN(len);
489
490         if (!len)
491                 return ERR_PTR(-EINVAL);
492
493         down_read(&dev->lock);
494         plist_for_each_entry(heap, &dev->heaps, node) {
495                 /* if the caller didn't specify this heap id */
496                 if (!((1 << heap->id) & heap_id_mask))
497                         continue;
498                 buffer = ion_buffer_create(heap, dev, len, align, flags);
499                 if (!IS_ERR(buffer))
500                         break;
501         }
502         up_read(&dev->lock);
503
504         if (buffer == NULL)
505                 return ERR_PTR(-ENODEV);
506
507         if (IS_ERR(buffer))
508                 return ERR_CAST(buffer);
509
510         handle = ion_handle_create(client, buffer);
511
512         /*
513          * ion_buffer_create will create a buffer with a ref_cnt of 1,
514          * and ion_handle_create will take a second reference, drop one here
515          */
516         ion_buffer_put(buffer);
517
518         if (IS_ERR(handle))
519                 return handle;
520
521         mutex_lock(&client->lock);
522         ret = ion_handle_add(client, handle);
523         mutex_unlock(&client->lock);
524         if (ret) {
525                 ion_handle_put(handle);
526                 handle = ERR_PTR(ret);
527         }
528
529         return handle;
530 }
531 EXPORT_SYMBOL(ion_alloc);
532
533 void ion_free(struct ion_client *client, struct ion_handle *handle)
534 {
535         bool valid_handle;
536
537         BUG_ON(client != handle->client);
538
539         mutex_lock(&client->lock);
540         valid_handle = ion_handle_validate(client, handle);
541
542         if (!valid_handle) {
543                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
544                 mutex_unlock(&client->lock);
545                 return;
546         }
547         mutex_unlock(&client->lock);
548         ion_handle_put(handle);
549 }
550 EXPORT_SYMBOL(ion_free);
551
552 int ion_phys(struct ion_client *client, struct ion_handle *handle,
553              ion_phys_addr_t *addr, size_t *len)
554 {
555         struct ion_buffer *buffer;
556         int ret;
557
558         mutex_lock(&client->lock);
559         if (!ion_handle_validate(client, handle)) {
560                 mutex_unlock(&client->lock);
561                 return -EINVAL;
562         }
563
564         buffer = handle->buffer;
565
566         if (!buffer->heap->ops->phys) {
567                 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
568                         __func__, buffer->heap->name, buffer->heap->type);
569                 mutex_unlock(&client->lock);
570                 return -ENODEV;
571         }
572         mutex_unlock(&client->lock);
573         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
574         return ret;
575 }
576 EXPORT_SYMBOL(ion_phys);
577
578 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
579 {
580         void *vaddr;
581
582         if (buffer->kmap_cnt) {
583                 buffer->kmap_cnt++;
584                 return buffer->vaddr;
585         }
586         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
587         if (WARN_ONCE(vaddr == NULL,
588                         "heap->ops->map_kernel should return ERR_PTR on error"))
589                 return ERR_PTR(-EINVAL);
590         if (IS_ERR(vaddr))
591                 return vaddr;
592         buffer->vaddr = vaddr;
593         buffer->kmap_cnt++;
594         return vaddr;
595 }
596
597 static void *ion_handle_kmap_get(struct ion_handle *handle)
598 {
599         struct ion_buffer *buffer = handle->buffer;
600         void *vaddr;
601
602         if (handle->kmap_cnt) {
603                 handle->kmap_cnt++;
604                 return buffer->vaddr;
605         }
606         vaddr = ion_buffer_kmap_get(buffer);
607         if (IS_ERR(vaddr))
608                 return vaddr;
609         handle->kmap_cnt++;
610         return vaddr;
611 }
612
613 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
614 {
615         buffer->kmap_cnt--;
616         if (!buffer->kmap_cnt) {
617                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
618                 buffer->vaddr = NULL;
619         }
620 }
621
622 static void ion_handle_kmap_put(struct ion_handle *handle)
623 {
624         struct ion_buffer *buffer = handle->buffer;
625
626         if (!handle->kmap_cnt) {
627                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
628                 return;
629         }
630         handle->kmap_cnt--;
631         if (!handle->kmap_cnt)
632                 ion_buffer_kmap_put(buffer);
633 }
634
635 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
636 {
637         struct ion_buffer *buffer;
638         void *vaddr;
639
640         mutex_lock(&client->lock);
641         if (!ion_handle_validate(client, handle)) {
642                 pr_err("%s: invalid handle passed to map_kernel.\n",
643                        __func__);
644                 mutex_unlock(&client->lock);
645                 return ERR_PTR(-EINVAL);
646         }
647
648         buffer = handle->buffer;
649
650         if (!handle->buffer->heap->ops->map_kernel) {
651                 pr_err("%s: map_kernel is not implemented by this heap.\n",
652                        __func__);
653                 mutex_unlock(&client->lock);
654                 return ERR_PTR(-ENODEV);
655         }
656
657         mutex_lock(&buffer->lock);
658         vaddr = ion_handle_kmap_get(handle);
659         mutex_unlock(&buffer->lock);
660         mutex_unlock(&client->lock);
661         return vaddr;
662 }
663 EXPORT_SYMBOL(ion_map_kernel);
664
665 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
666 {
667         struct ion_buffer *buffer;
668
669         mutex_lock(&client->lock);
670         buffer = handle->buffer;
671         mutex_lock(&buffer->lock);
672         ion_handle_kmap_put(handle);
673         mutex_unlock(&buffer->lock);
674         mutex_unlock(&client->lock);
675 }
676 EXPORT_SYMBOL(ion_unmap_kernel);
677
678 static int ion_debug_client_show(struct seq_file *s, void *unused)
679 {
680         struct ion_client *client = s->private;
681         struct rb_node *n;
682         size_t sizes[ION_NUM_HEAP_IDS] = {0};
683         const char *names[ION_NUM_HEAP_IDS] = {NULL};
684         int i;
685
686         mutex_lock(&client->lock);
687         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
688                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
689                                                      node);
690                 unsigned int id = handle->buffer->heap->id;
691
692                 if (!names[id])
693                         names[id] = handle->buffer->heap->name;
694                 sizes[id] += handle->buffer->size;
695         }
696         mutex_unlock(&client->lock);
697
698         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
699         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
700                 if (!names[i])
701                         continue;
702                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
703         }
704         return 0;
705 }
706
707 static int ion_debug_client_open(struct inode *inode, struct file *file)
708 {
709         return single_open(file, ion_debug_client_show, inode->i_private);
710 }
711
712 static const struct file_operations debug_client_fops = {
713         .open = ion_debug_client_open,
714         .read = seq_read,
715         .llseek = seq_lseek,
716         .release = single_release,
717 };
718
719 static int ion_get_client_serial(const struct rb_root *root,
720                                         const unsigned char *name)
721 {
722         int serial = -1;
723         struct rb_node *node;
724
725         for (node = rb_first(root); node; node = rb_next(node)) {
726                 struct ion_client *client = rb_entry(node, struct ion_client,
727                                                 node);
728
729                 if (strcmp(client->name, name))
730                         continue;
731                 serial = max(serial, client->display_serial);
732         }
733         return serial + 1;
734 }
735
736 struct ion_client *ion_client_create(struct ion_device *dev,
737                                      const char *name)
738 {
739         struct ion_client *client;
740         struct task_struct *task;
741         struct rb_node **p;
742         struct rb_node *parent = NULL;
743         struct ion_client *entry;
744         pid_t pid;
745
746         if (!name) {
747                 pr_err("%s: Name cannot be null\n", __func__);
748                 return ERR_PTR(-EINVAL);
749         }
750
751         get_task_struct(current->group_leader);
752         task_lock(current->group_leader);
753         pid = task_pid_nr(current->group_leader);
754         /*
755          * don't bother to store task struct for kernel threads,
756          * they can't be killed anyway
757          */
758         if (current->group_leader->flags & PF_KTHREAD) {
759                 put_task_struct(current->group_leader);
760                 task = NULL;
761         } else {
762                 task = current->group_leader;
763         }
764         task_unlock(current->group_leader);
765
766         client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
767         if (!client)
768                 goto err_put_task_struct;
769
770         client->dev = dev;
771         client->handles = RB_ROOT;
772         idr_init(&client->idr);
773         mutex_init(&client->lock);
774         client->task = task;
775         client->pid = pid;
776         client->name = kstrdup(name, GFP_KERNEL);
777         if (!client->name)
778                 goto err_free_client;
779
780         down_write(&dev->lock);
781         client->display_serial = ion_get_client_serial(&dev->clients, name);
782         client->display_name = kasprintf(
783                 GFP_KERNEL, "%s-%d", name, client->display_serial);
784         if (!client->display_name) {
785                 up_write(&dev->lock);
786                 goto err_free_client_name;
787         }
788         p = &dev->clients.rb_node;
789         while (*p) {
790                 parent = *p;
791                 entry = rb_entry(parent, struct ion_client, node);
792
793                 if (client < entry)
794                         p = &(*p)->rb_left;
795                 else if (client > entry)
796                         p = &(*p)->rb_right;
797         }
798         rb_link_node(&client->node, parent, p);
799         rb_insert_color(&client->node, &dev->clients);
800
801         client->debug_root = debugfs_create_file(client->display_name, 0664,
802                                                 dev->clients_debug_root,
803                                                 client, &debug_client_fops);
804         if (!client->debug_root) {
805                 char buf[256], *path;
806
807                 path = dentry_path(dev->clients_debug_root, buf, 256);
808                 pr_err("Failed to create client debugfs at %s/%s\n",
809                         path, client->display_name);
810         }
811
812         up_write(&dev->lock);
813
814         return client;
815
816 err_free_client_name:
817         kfree(client->name);
818 err_free_client:
819         kfree(client);
820 err_put_task_struct:
821         if (task)
822                 put_task_struct(current->group_leader);
823         return ERR_PTR(-ENOMEM);
824 }
825 EXPORT_SYMBOL(ion_client_create);
826
827 void ion_client_destroy(struct ion_client *client)
828 {
829         struct ion_device *dev = client->dev;
830         struct rb_node *n;
831
832         pr_debug("%s: %d\n", __func__, __LINE__);
833         while ((n = rb_first(&client->handles))) {
834                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
835                                                      node);
836                 ion_handle_destroy(&handle->ref);
837         }
838
839         idr_destroy(&client->idr);
840
841         down_write(&dev->lock);
842         if (client->task)
843                 put_task_struct(client->task);
844         rb_erase(&client->node, &dev->clients);
845         debugfs_remove_recursive(client->debug_root);
846         up_write(&dev->lock);
847
848         kfree(client->display_name);
849         kfree(client->name);
850         kfree(client);
851 }
852 EXPORT_SYMBOL(ion_client_destroy);
853
854 struct sg_table *ion_sg_table(struct ion_client *client,
855                               struct ion_handle *handle)
856 {
857         struct ion_buffer *buffer;
858         struct sg_table *table;
859
860         mutex_lock(&client->lock);
861         if (!ion_handle_validate(client, handle)) {
862                 pr_err("%s: invalid handle passed to map_dma.\n",
863                        __func__);
864                 mutex_unlock(&client->lock);
865                 return ERR_PTR(-EINVAL);
866         }
867         buffer = handle->buffer;
868         table = buffer->sg_table;
869         mutex_unlock(&client->lock);
870         return table;
871 }
872 EXPORT_SYMBOL(ion_sg_table);
873
874 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
875                                        struct device *dev,
876                                        enum dma_data_direction direction);
877
878 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
879                                         enum dma_data_direction direction)
880 {
881         struct dma_buf *dmabuf = attachment->dmabuf;
882         struct ion_buffer *buffer = dmabuf->priv;
883
884         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
885         return buffer->sg_table;
886 }
887
888 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
889                               struct sg_table *table,
890                               enum dma_data_direction direction)
891 {
892 }
893
894 void ion_pages_sync_for_device(struct device *dev, struct page *page,
895                 size_t size, enum dma_data_direction dir)
896 {
897         struct scatterlist sg;
898
899         sg_init_table(&sg, 1);
900         sg_set_page(&sg, page, size, 0);
901         /*
902          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
903          * for the targeted device, but this works on the currently targeted
904          * hardware.
905          */
906         sg_dma_address(&sg) = page_to_phys(page);
907         dma_sync_sg_for_device(dev, &sg, 1, dir);
908 }
909
910 struct ion_vma_list {
911         struct list_head list;
912         struct vm_area_struct *vma;
913 };
914
915 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
916                                        struct device *dev,
917                                        enum dma_data_direction dir)
918 {
919         struct ion_vma_list *vma_list;
920         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
921         int i;
922
923         pr_debug("%s: syncing for device %s\n", __func__,
924                  dev ? dev_name(dev) : "null");
925
926         if (!ion_buffer_fault_user_mappings(buffer))
927                 return;
928
929         mutex_lock(&buffer->lock);
930         for (i = 0; i < pages; i++) {
931                 struct page *page = buffer->pages[i];
932
933                 if (ion_buffer_page_is_dirty(page))
934                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
935                                                         PAGE_SIZE, dir);
936
937                 ion_buffer_page_clean(buffer->pages + i);
938         }
939         list_for_each_entry(vma_list, &buffer->vmas, list) {
940                 struct vm_area_struct *vma = vma_list->vma;
941
942                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
943                                NULL);
944         }
945         mutex_unlock(&buffer->lock);
946 }
947
948 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
949 {
950         struct ion_buffer *buffer = vma->vm_private_data;
951         unsigned long pfn;
952         int ret;
953
954         mutex_lock(&buffer->lock);
955         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
956         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
957
958         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
959         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
960         mutex_unlock(&buffer->lock);
961         if (ret)
962                 return VM_FAULT_ERROR;
963
964         return VM_FAULT_NOPAGE;
965 }
966
967 static void ion_vm_open(struct vm_area_struct *vma)
968 {
969         struct ion_buffer *buffer = vma->vm_private_data;
970         struct ion_vma_list *vma_list;
971
972         vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
973         if (!vma_list)
974                 return;
975         vma_list->vma = vma;
976         mutex_lock(&buffer->lock);
977         list_add(&vma_list->list, &buffer->vmas);
978         mutex_unlock(&buffer->lock);
979         pr_debug("%s: adding %p\n", __func__, vma);
980 }
981
982 static void ion_vm_close(struct vm_area_struct *vma)
983 {
984         struct ion_buffer *buffer = vma->vm_private_data;
985         struct ion_vma_list *vma_list, *tmp;
986
987         pr_debug("%s\n", __func__);
988         mutex_lock(&buffer->lock);
989         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
990                 if (vma_list->vma != vma)
991                         continue;
992                 list_del(&vma_list->list);
993                 kfree(vma_list);
994                 pr_debug("%s: deleting %p\n", __func__, vma);
995                 break;
996         }
997         mutex_unlock(&buffer->lock);
998 }
999
1000 static const struct vm_operations_struct ion_vma_ops = {
1001         .open = ion_vm_open,
1002         .close = ion_vm_close,
1003         .fault = ion_vm_fault,
1004 };
1005
1006 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1007 {
1008         struct ion_buffer *buffer = dmabuf->priv;
1009         int ret = 0;
1010
1011         if (!buffer->heap->ops->map_user) {
1012                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1013                         __func__);
1014                 return -EINVAL;
1015         }
1016
1017         if (ion_buffer_fault_user_mappings(buffer)) {
1018                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1019                                                         VM_DONTDUMP;
1020                 vma->vm_private_data = buffer;
1021                 vma->vm_ops = &ion_vma_ops;
1022                 ion_vm_open(vma);
1023                 return 0;
1024         }
1025
1026         if (!(buffer->flags & ION_FLAG_CACHED))
1027                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1028
1029         mutex_lock(&buffer->lock);
1030         /* now map it to userspace */
1031         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1032         mutex_unlock(&buffer->lock);
1033
1034         if (ret)
1035                 pr_err("%s: failure mapping buffer to userspace\n",
1036                        __func__);
1037
1038         return ret;
1039 }
1040
1041 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1042 {
1043         struct ion_buffer *buffer = dmabuf->priv;
1044
1045         ion_buffer_put(buffer);
1046 }
1047
1048 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1049 {
1050         struct ion_buffer *buffer = dmabuf->priv;
1051
1052         return buffer->vaddr + offset * PAGE_SIZE;
1053 }
1054
1055 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1056                                void *ptr)
1057 {
1058 }
1059
1060 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1061                                         enum dma_data_direction direction)
1062 {
1063         struct ion_buffer *buffer = dmabuf->priv;
1064         void *vaddr;
1065
1066         if (!buffer->heap->ops->map_kernel) {
1067                 pr_err("%s: map kernel is not implemented by this heap.\n",
1068                        __func__);
1069                 return -ENODEV;
1070         }
1071
1072         mutex_lock(&buffer->lock);
1073         vaddr = ion_buffer_kmap_get(buffer);
1074         mutex_unlock(&buffer->lock);
1075         return PTR_ERR_OR_ZERO(vaddr);
1076 }
1077
1078 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1079                                        enum dma_data_direction direction)
1080 {
1081         struct ion_buffer *buffer = dmabuf->priv;
1082
1083         mutex_lock(&buffer->lock);
1084         ion_buffer_kmap_put(buffer);
1085         mutex_unlock(&buffer->lock);
1086 }
1087
1088 static struct dma_buf_ops dma_buf_ops = {
1089         .map_dma_buf = ion_map_dma_buf,
1090         .unmap_dma_buf = ion_unmap_dma_buf,
1091         .mmap = ion_mmap,
1092         .release = ion_dma_buf_release,
1093         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1094         .end_cpu_access = ion_dma_buf_end_cpu_access,
1095         .kmap_atomic = ion_dma_buf_kmap,
1096         .kunmap_atomic = ion_dma_buf_kunmap,
1097         .kmap = ion_dma_buf_kmap,
1098         .kunmap = ion_dma_buf_kunmap,
1099 };
1100
1101 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1102                                                 struct ion_handle *handle)
1103 {
1104         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1105         struct ion_buffer *buffer;
1106         struct dma_buf *dmabuf;
1107         bool valid_handle;
1108
1109         mutex_lock(&client->lock);
1110         valid_handle = ion_handle_validate(client, handle);
1111         if (!valid_handle) {
1112                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1113                 mutex_unlock(&client->lock);
1114                 return ERR_PTR(-EINVAL);
1115         }
1116         buffer = handle->buffer;
1117         ion_buffer_get(buffer);
1118         mutex_unlock(&client->lock);
1119
1120         exp_info.ops = &dma_buf_ops;
1121         exp_info.size = buffer->size;
1122         exp_info.flags = O_RDWR;
1123         exp_info.priv = buffer;
1124
1125         dmabuf = dma_buf_export(&exp_info);
1126         if (IS_ERR(dmabuf)) {
1127                 ion_buffer_put(buffer);
1128                 return dmabuf;
1129         }
1130
1131         return dmabuf;
1132 }
1133 EXPORT_SYMBOL(ion_share_dma_buf);
1134
1135 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1136 {
1137         struct dma_buf *dmabuf;
1138         int fd;
1139
1140         dmabuf = ion_share_dma_buf(client, handle);
1141         if (IS_ERR(dmabuf))
1142                 return PTR_ERR(dmabuf);
1143
1144         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1145         if (fd < 0)
1146                 dma_buf_put(dmabuf);
1147
1148         return fd;
1149 }
1150 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1151
1152 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1153 {
1154         struct dma_buf *dmabuf;
1155         struct ion_buffer *buffer;
1156         struct ion_handle *handle;
1157         int ret;
1158
1159         dmabuf = dma_buf_get(fd);
1160         if (IS_ERR(dmabuf))
1161                 return ERR_CAST(dmabuf);
1162         /* if this memory came from ion */
1163
1164         if (dmabuf->ops != &dma_buf_ops) {
1165                 pr_err("%s: can not import dmabuf from another exporter\n",
1166                        __func__);
1167                 dma_buf_put(dmabuf);
1168                 return ERR_PTR(-EINVAL);
1169         }
1170         buffer = dmabuf->priv;
1171
1172         mutex_lock(&client->lock);
1173         /* if a handle exists for this buffer just take a reference to it */
1174         handle = ion_handle_lookup(client, buffer);
1175         if (!IS_ERR(handle)) {
1176                 ion_handle_get(handle);
1177                 mutex_unlock(&client->lock);
1178                 goto end;
1179         }
1180
1181         handle = ion_handle_create(client, buffer);
1182         if (IS_ERR(handle)) {
1183                 mutex_unlock(&client->lock);
1184                 goto end;
1185         }
1186
1187         ret = ion_handle_add(client, handle);
1188         mutex_unlock(&client->lock);
1189         if (ret) {
1190                 ion_handle_put(handle);
1191                 handle = ERR_PTR(ret);
1192         }
1193
1194 end:
1195         dma_buf_put(dmabuf);
1196         return handle;
1197 }
1198 EXPORT_SYMBOL(ion_import_dma_buf);
1199
1200 static int ion_sync_for_device(struct ion_client *client, int fd)
1201 {
1202         struct dma_buf *dmabuf;
1203         struct ion_buffer *buffer;
1204
1205         dmabuf = dma_buf_get(fd);
1206         if (IS_ERR(dmabuf))
1207                 return PTR_ERR(dmabuf);
1208
1209         /* if this memory came from ion */
1210         if (dmabuf->ops != &dma_buf_ops) {
1211                 pr_err("%s: can not sync dmabuf from another exporter\n",
1212                        __func__);
1213                 dma_buf_put(dmabuf);
1214                 return -EINVAL;
1215         }
1216         buffer = dmabuf->priv;
1217
1218         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1219                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1220         dma_buf_put(dmabuf);
1221         return 0;
1222 }
1223
1224 /* fix up the cases where the ioctl direction bits are incorrect */
1225 static unsigned int ion_ioctl_dir(unsigned int cmd)
1226 {
1227         switch (cmd) {
1228         case ION_IOC_SYNC:
1229         case ION_IOC_FREE:
1230         case ION_IOC_CUSTOM:
1231                 return _IOC_WRITE;
1232         default:
1233                 return _IOC_DIR(cmd);
1234         }
1235 }
1236
1237 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1238 {
1239         struct ion_client *client = filp->private_data;
1240         struct ion_device *dev = client->dev;
1241         struct ion_handle *cleanup_handle = NULL;
1242         int ret = 0;
1243         unsigned int dir;
1244
1245         union {
1246                 struct ion_fd_data fd;
1247                 struct ion_allocation_data allocation;
1248                 struct ion_handle_data handle;
1249                 struct ion_custom_data custom;
1250         } data;
1251
1252         dir = ion_ioctl_dir(cmd);
1253
1254         if (_IOC_SIZE(cmd) > sizeof(data))
1255                 return -EINVAL;
1256
1257         if (dir & _IOC_WRITE)
1258                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1259                         return -EFAULT;
1260
1261         switch (cmd) {
1262         case ION_IOC_ALLOC:
1263         {
1264                 struct ion_handle *handle;
1265
1266                 handle = ion_alloc(client, data.allocation.len,
1267                                                 data.allocation.align,
1268                                                 data.allocation.heap_id_mask,
1269                                                 data.allocation.flags);
1270                 if (IS_ERR(handle))
1271                         return PTR_ERR(handle);
1272
1273                 data.allocation.handle = handle->id;
1274
1275                 cleanup_handle = handle;
1276                 break;
1277         }
1278         case ION_IOC_FREE:
1279         {
1280                 struct ion_handle *handle;
1281
1282                 handle = ion_handle_get_by_id(client, data.handle.handle);
1283                 if (IS_ERR(handle))
1284                         return PTR_ERR(handle);
1285                 ion_free(client, handle);
1286                 ion_handle_put(handle);
1287                 break;
1288         }
1289         case ION_IOC_SHARE:
1290         case ION_IOC_MAP:
1291         {
1292                 struct ion_handle *handle;
1293
1294                 handle = ion_handle_get_by_id(client, data.handle.handle);
1295                 if (IS_ERR(handle))
1296                         return PTR_ERR(handle);
1297                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1298                 ion_handle_put(handle);
1299                 if (data.fd.fd < 0)
1300                         ret = data.fd.fd;
1301                 break;
1302         }
1303         case ION_IOC_IMPORT:
1304         {
1305                 struct ion_handle *handle;
1306
1307                 handle = ion_import_dma_buf(client, data.fd.fd);
1308                 if (IS_ERR(handle))
1309                         ret = PTR_ERR(handle);
1310                 else
1311                         data.handle.handle = handle->id;
1312                 break;
1313         }
1314         case ION_IOC_SYNC:
1315         {
1316                 ret = ion_sync_for_device(client, data.fd.fd);
1317                 break;
1318         }
1319         case ION_IOC_CUSTOM:
1320         {
1321                 if (!dev->custom_ioctl)
1322                         return -ENOTTY;
1323                 ret = dev->custom_ioctl(client, data.custom.cmd,
1324                                                 data.custom.arg);
1325                 break;
1326         }
1327         default:
1328                 return -ENOTTY;
1329         }
1330
1331         if (dir & _IOC_READ) {
1332                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1333                         if (cleanup_handle)
1334                                 ion_free(client, cleanup_handle);
1335                         return -EFAULT;
1336                 }
1337         }
1338         return ret;
1339 }
1340
1341 static int ion_release(struct inode *inode, struct file *file)
1342 {
1343         struct ion_client *client = file->private_data;
1344
1345         pr_debug("%s: %d\n", __func__, __LINE__);
1346         ion_client_destroy(client);
1347         return 0;
1348 }
1349
1350 static int ion_open(struct inode *inode, struct file *file)
1351 {
1352         struct miscdevice *miscdev = file->private_data;
1353         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1354         struct ion_client *client;
1355         char debug_name[64];
1356
1357         pr_debug("%s: %d\n", __func__, __LINE__);
1358         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1359         client = ion_client_create(dev, debug_name);
1360         if (IS_ERR(client))
1361                 return PTR_ERR(client);
1362         file->private_data = client;
1363
1364         return 0;
1365 }
1366
1367 static const struct file_operations ion_fops = {
1368         .owner          = THIS_MODULE,
1369         .open           = ion_open,
1370         .release        = ion_release,
1371         .unlocked_ioctl = ion_ioctl,
1372         .compat_ioctl   = compat_ion_ioctl,
1373 };
1374
1375 static size_t ion_debug_heap_total(struct ion_client *client,
1376                                    unsigned int id)
1377 {
1378         size_t size = 0;
1379         struct rb_node *n;
1380
1381         mutex_lock(&client->lock);
1382         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1383                 struct ion_handle *handle = rb_entry(n,
1384                                                      struct ion_handle,
1385                                                      node);
1386                 if (handle->buffer->heap->id == id)
1387                         size += handle->buffer->size;
1388         }
1389         mutex_unlock(&client->lock);
1390         return size;
1391 }
1392
1393 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1394 {
1395         struct ion_heap *heap = s->private;
1396         struct ion_device *dev = heap->dev;
1397         struct rb_node *n;
1398         size_t total_size = 0;
1399         size_t total_orphaned_size = 0;
1400
1401         seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1402         seq_puts(s, "----------------------------------------------------\n");
1403
1404         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1405                 struct ion_client *client = rb_entry(n, struct ion_client,
1406                                                      node);
1407                 size_t size = ion_debug_heap_total(client, heap->id);
1408
1409                 if (!size)
1410                         continue;
1411                 if (client->task) {
1412                         char task_comm[TASK_COMM_LEN];
1413
1414                         get_task_comm(task_comm, client->task);
1415                         seq_printf(s, "%16s %16u %16zu\n", task_comm,
1416                                    client->pid, size);
1417                 } else {
1418                         seq_printf(s, "%16s %16u %16zu\n", client->name,
1419                                    client->pid, size);
1420                 }
1421         }
1422         seq_puts(s, "----------------------------------------------------\n");
1423         seq_puts(s, "orphaned allocations (info is from last known client):\n");
1424         mutex_lock(&dev->buffer_lock);
1425         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1426                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1427                                                      node);
1428                 if (buffer->heap->id != heap->id)
1429                         continue;
1430                 total_size += buffer->size;
1431                 if (!buffer->handle_count) {
1432                         seq_printf(s, "%16s %16u %16zu %d %d\n",
1433                                    buffer->task_comm, buffer->pid,
1434                                    buffer->size, buffer->kmap_cnt,
1435                                    atomic_read(&buffer->ref.refcount));
1436                         total_orphaned_size += buffer->size;
1437                 }
1438         }
1439         mutex_unlock(&dev->buffer_lock);
1440         seq_puts(s, "----------------------------------------------------\n");
1441         seq_printf(s, "%16s %16zu\n", "total orphaned",
1442                    total_orphaned_size);
1443         seq_printf(s, "%16s %16zu\n", "total ", total_size);
1444         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1445                 seq_printf(s, "%16s %16zu\n", "deferred free",
1446                                 heap->free_list_size);
1447         seq_puts(s, "----------------------------------------------------\n");
1448
1449         if (heap->debug_show)
1450                 heap->debug_show(heap, s, unused);
1451
1452         return 0;
1453 }
1454
1455 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1456 {
1457         return single_open(file, ion_debug_heap_show, inode->i_private);
1458 }
1459
1460 static const struct file_operations debug_heap_fops = {
1461         .open = ion_debug_heap_open,
1462         .read = seq_read,
1463         .llseek = seq_lseek,
1464         .release = single_release,
1465 };
1466
1467 static int debug_shrink_set(void *data, u64 val)
1468 {
1469         struct ion_heap *heap = data;
1470         struct shrink_control sc;
1471         int objs;
1472
1473         sc.gfp_mask = -1;
1474         sc.nr_to_scan = val;
1475
1476         if (!val) {
1477                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1478                 sc.nr_to_scan = objs;
1479         }
1480
1481         heap->shrinker.scan_objects(&heap->shrinker, &sc);
1482         return 0;
1483 }
1484
1485 static int debug_shrink_get(void *data, u64 *val)
1486 {
1487         struct ion_heap *heap = data;
1488         struct shrink_control sc;
1489         int objs;
1490
1491         sc.gfp_mask = -1;
1492         sc.nr_to_scan = 0;
1493
1494         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1495         *val = objs;
1496         return 0;
1497 }
1498
1499 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1500                         debug_shrink_set, "%llu\n");
1501
1502 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1503 {
1504         struct dentry *debug_file;
1505
1506         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1507             !heap->ops->unmap_dma)
1508                 pr_err("%s: can not add heap with invalid ops struct.\n",
1509                        __func__);
1510
1511         spin_lock_init(&heap->free_lock);
1512         heap->free_list_size = 0;
1513
1514         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1515                 ion_heap_init_deferred_free(heap);
1516
1517         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1518                 ion_heap_init_shrinker(heap);
1519
1520         heap->dev = dev;
1521         down_write(&dev->lock);
1522         /*
1523          * use negative heap->id to reverse the priority -- when traversing
1524          * the list later attempt higher id numbers first
1525          */
1526         plist_node_init(&heap->node, -heap->id);
1527         plist_add(&heap->node, &dev->heaps);
1528         debug_file = debugfs_create_file(heap->name, 0664,
1529                                         dev->heaps_debug_root, heap,
1530                                         &debug_heap_fops);
1531
1532         if (!debug_file) {
1533                 char buf[256], *path;
1534
1535                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1536                 pr_err("Failed to create heap debugfs at %s/%s\n",
1537                         path, heap->name);
1538         }
1539
1540         if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1541                 char debug_name[64];
1542
1543                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1544                 debug_file = debugfs_create_file(
1545                         debug_name, 0644, dev->heaps_debug_root, heap,
1546                         &debug_shrink_fops);
1547                 if (!debug_file) {
1548                         char buf[256], *path;
1549
1550                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1551                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1552                                 path, debug_name);
1553                 }
1554         }
1555
1556         up_write(&dev->lock);
1557 }
1558 EXPORT_SYMBOL(ion_device_add_heap);
1559
1560 struct ion_device *ion_device_create(long (*custom_ioctl)
1561                                      (struct ion_client *client,
1562                                       unsigned int cmd,
1563                                       unsigned long arg))
1564 {
1565         struct ion_device *idev;
1566         int ret;
1567
1568         idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1569         if (!idev)
1570                 return ERR_PTR(-ENOMEM);
1571
1572         idev->dev.minor = MISC_DYNAMIC_MINOR;
1573         idev->dev.name = "ion";
1574         idev->dev.fops = &ion_fops;
1575         idev->dev.parent = NULL;
1576         ret = misc_register(&idev->dev);
1577         if (ret) {
1578                 pr_err("ion: failed to register misc device.\n");
1579                 kfree(idev);
1580                 return ERR_PTR(ret);
1581         }
1582
1583         idev->debug_root = debugfs_create_dir("ion", NULL);
1584         if (!idev->debug_root) {
1585                 pr_err("ion: failed to create debugfs root directory.\n");
1586                 goto debugfs_done;
1587         }
1588         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1589         if (!idev->heaps_debug_root) {
1590                 pr_err("ion: failed to create debugfs heaps directory.\n");
1591                 goto debugfs_done;
1592         }
1593         idev->clients_debug_root = debugfs_create_dir("clients",
1594                                                 idev->debug_root);
1595         if (!idev->clients_debug_root)
1596                 pr_err("ion: failed to create debugfs clients directory.\n");
1597
1598 debugfs_done:
1599
1600         idev->custom_ioctl = custom_ioctl;
1601         idev->buffers = RB_ROOT;
1602         mutex_init(&idev->buffer_lock);
1603         init_rwsem(&idev->lock);
1604         plist_head_init(&idev->heaps);
1605         idev->clients = RB_ROOT;
1606         return idev;
1607 }
1608 EXPORT_SYMBOL(ion_device_create);
1609
1610 void ion_device_destroy(struct ion_device *dev)
1611 {
1612         misc_deregister(&dev->dev);
1613         debugfs_remove_recursive(dev->debug_root);
1614         /* XXX need to free the heaps and clients ? */
1615         kfree(dev);
1616 }
1617 EXPORT_SYMBOL(ion_device_destroy);
1618
1619 void __init ion_reserve(struct ion_platform_data *data)
1620 {
1621         int i;
1622
1623         for (i = 0; i < data->nr; i++) {
1624                 if (data->heaps[i].size == 0)
1625                         continue;
1626
1627                 if (data->heaps[i].base == 0) {
1628                         phys_addr_t paddr;
1629
1630                         paddr = memblock_alloc_base(data->heaps[i].size,
1631                                                     data->heaps[i].align,
1632                                                     MEMBLOCK_ALLOC_ANYWHERE);
1633                         if (!paddr) {
1634                                 pr_err("%s: error allocating memblock for heap %d\n",
1635                                         __func__, i);
1636                                 continue;
1637                         }
1638                         data->heaps[i].base = paddr;
1639                 } else {
1640                         int ret = memblock_reserve(data->heaps[i].base,
1641                                                data->heaps[i].size);
1642                         if (ret)
1643                                 pr_err("memblock reserve of %zx@%lx failed\n",
1644                                        data->heaps[i].size,
1645                                        data->heaps[i].base);
1646                 }
1647                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1648                         data->heaps[i].name,
1649                         data->heaps[i].base,
1650                         data->heaps[i].size);
1651         }
1652 }