]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/staging/android/ion/ion.c
staging: android: ion: Get rid of ion_sg_table
[karo-tx-linux.git] / drivers / staging / android / ion / ion.c
1 /*
2  *
3  * drivers/staging/android/ion/ion.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39
40 #include "ion.h"
41 #include "ion_priv.h"
42 #include "compat_ion.h"
43
44 /**
45  * struct ion_device - the metadata of the ion device node
46  * @dev:                the actual misc device
47  * @buffers:            an rb tree of all the existing buffers
48  * @buffer_lock:        lock protecting the tree of buffers
49  * @lock:               rwsem protecting the tree of heaps and clients
50  * @heaps:              list of all the heaps in the system
51  * @user_clients:       list of all the clients created from userspace
52  */
53 struct ion_device {
54         struct miscdevice dev;
55         struct rb_root buffers;
56         struct mutex buffer_lock;
57         struct rw_semaphore lock;
58         struct plist_head heaps;
59         long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60                              unsigned long arg);
61         struct rb_root clients;
62         struct dentry *debug_root;
63         struct dentry *heaps_debug_root;
64         struct dentry *clients_debug_root;
65 };
66
67 /**
68  * struct ion_client - a process/hw block local address space
69  * @node:               node in the tree of all clients
70  * @dev:                backpointer to ion device
71  * @handles:            an rb tree of all the handles in this client
72  * @idr:                an idr space for allocating handle ids
73  * @lock:               lock protecting the tree of handles
74  * @name:               used for debugging
75  * @display_name:       used for debugging (unique version of @name)
76  * @display_serial:     used for debugging (to make display_name unique)
77  * @task:               used for debugging
78  *
79  * A client represents a list of buffers this client may access.
80  * The mutex stored here is used to protect both handles tree
81  * as well as the handles themselves, and should be held while modifying either.
82  */
83 struct ion_client {
84         struct rb_node node;
85         struct ion_device *dev;
86         struct rb_root handles;
87         struct idr idr;
88         struct mutex lock;
89         const char *name;
90         char *display_name;
91         int display_serial;
92         struct task_struct *task;
93         pid_t pid;
94         struct dentry *debug_root;
95 };
96
97 /**
98  * ion_handle - a client local reference to a buffer
99  * @ref:                reference count
100  * @client:             back pointer to the client the buffer resides in
101  * @buffer:             pointer to the buffer
102  * @node:               node in the client's handle rbtree
103  * @kmap_cnt:           count of times this client has mapped to kernel
104  * @id:                 client-unique id allocated by client->idr
105  *
106  * Modifications to node, map_cnt or mapping should be protected by the
107  * lock in the client.  Other fields are never changed after initialization.
108  */
109 struct ion_handle {
110         struct kref ref;
111         struct ion_client *client;
112         struct ion_buffer *buffer;
113         struct rb_node node;
114         unsigned int kmap_cnt;
115         int id;
116 };
117
118 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119 {
120         return (buffer->flags & ION_FLAG_CACHED) &&
121                 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
122 }
123
124 bool ion_buffer_cached(struct ion_buffer *buffer)
125 {
126         return !!(buffer->flags & ION_FLAG_CACHED);
127 }
128
129 static inline struct page *ion_buffer_page(struct page *page)
130 {
131         return (struct page *)((unsigned long)page & ~(1UL));
132 }
133
134 static inline bool ion_buffer_page_is_dirty(struct page *page)
135 {
136         return !!((unsigned long)page & 1UL);
137 }
138
139 static inline void ion_buffer_page_dirty(struct page **page)
140 {
141         *page = (struct page *)((unsigned long)(*page) | 1UL);
142 }
143
144 static inline void ion_buffer_page_clean(struct page **page)
145 {
146         *page = (struct page *)((unsigned long)(*page) & ~(1UL));
147 }
148
149 /* this function should only be called while dev->lock is held */
150 static void ion_buffer_add(struct ion_device *dev,
151                            struct ion_buffer *buffer)
152 {
153         struct rb_node **p = &dev->buffers.rb_node;
154         struct rb_node *parent = NULL;
155         struct ion_buffer *entry;
156
157         while (*p) {
158                 parent = *p;
159                 entry = rb_entry(parent, struct ion_buffer, node);
160
161                 if (buffer < entry) {
162                         p = &(*p)->rb_left;
163                 } else if (buffer > entry) {
164                         p = &(*p)->rb_right;
165                 } else {
166                         pr_err("%s: buffer already found.", __func__);
167                         BUG();
168                 }
169         }
170
171         rb_link_node(&buffer->node, parent, p);
172         rb_insert_color(&buffer->node, &dev->buffers);
173 }
174
175 /* this function should only be called while dev->lock is held */
176 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177                                      struct ion_device *dev,
178                                      unsigned long len,
179                                      unsigned long align,
180                                      unsigned long flags)
181 {
182         struct ion_buffer *buffer;
183         struct sg_table *table;
184         struct scatterlist *sg;
185         int i, ret;
186
187         buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
188         if (!buffer)
189                 return ERR_PTR(-ENOMEM);
190
191         buffer->heap = heap;
192         buffer->flags = flags;
193         kref_init(&buffer->ref);
194
195         ret = heap->ops->allocate(heap, buffer, len, align, flags);
196
197         if (ret) {
198                 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199                         goto err2;
200
201                 ion_heap_freelist_drain(heap, 0);
202                 ret = heap->ops->allocate(heap, buffer, len, align,
203                                           flags);
204                 if (ret)
205                         goto err2;
206         }
207
208         buffer->dev = dev;
209         buffer->size = len;
210
211         table = heap->ops->map_dma(heap, buffer);
212         if (WARN_ONCE(table == NULL,
213                         "heap->ops->map_dma should return ERR_PTR on error"))
214                 table = ERR_PTR(-EINVAL);
215         if (IS_ERR(table)) {
216                 ret = -EINVAL;
217                 goto err1;
218         }
219
220         buffer->sg_table = table;
221         if (ion_buffer_fault_user_mappings(buffer)) {
222                 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223                 struct scatterlist *sg;
224                 int i, j, k = 0;
225
226                 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227                 if (!buffer->pages) {
228                         ret = -ENOMEM;
229                         goto err;
230                 }
231
232                 for_each_sg(table->sgl, sg, table->nents, i) {
233                         struct page *page = sg_page(sg);
234
235                         for (j = 0; j < sg->length / PAGE_SIZE; j++)
236                                 buffer->pages[k++] = page++;
237                 }
238         }
239
240         buffer->dev = dev;
241         buffer->size = len;
242         INIT_LIST_HEAD(&buffer->vmas);
243         mutex_init(&buffer->lock);
244         /*
245          * this will set up dma addresses for the sglist -- it is not
246          * technically correct as per the dma api -- a specific
247          * device isn't really taking ownership here.  However, in practice on
248          * our systems the only dma_address space is physical addresses.
249          * Additionally, we can't afford the overhead of invalidating every
250          * allocation via dma_map_sg. The implicit contract here is that
251          * memory coming from the heaps is ready for dma, ie if it has a
252          * cached mapping that mapping has been invalidated
253          */
254         for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
255                 sg_dma_address(sg) = sg_phys(sg);
256                 sg_dma_len(sg) = sg->length;
257         }
258         mutex_lock(&dev->buffer_lock);
259         ion_buffer_add(dev, buffer);
260         mutex_unlock(&dev->buffer_lock);
261         return buffer;
262
263 err:
264         heap->ops->unmap_dma(heap, buffer);
265 err1:
266         heap->ops->free(buffer);
267 err2:
268         kfree(buffer);
269         return ERR_PTR(ret);
270 }
271
272 void ion_buffer_destroy(struct ion_buffer *buffer)
273 {
274         if (WARN_ON(buffer->kmap_cnt > 0))
275                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
276         buffer->heap->ops->unmap_dma(buffer->heap, buffer);
277         buffer->heap->ops->free(buffer);
278         vfree(buffer->pages);
279         kfree(buffer);
280 }
281
282 static void _ion_buffer_destroy(struct kref *kref)
283 {
284         struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
285         struct ion_heap *heap = buffer->heap;
286         struct ion_device *dev = buffer->dev;
287
288         mutex_lock(&dev->buffer_lock);
289         rb_erase(&buffer->node, &dev->buffers);
290         mutex_unlock(&dev->buffer_lock);
291
292         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
293                 ion_heap_freelist_add(heap, buffer);
294         else
295                 ion_buffer_destroy(buffer);
296 }
297
298 static void ion_buffer_get(struct ion_buffer *buffer)
299 {
300         kref_get(&buffer->ref);
301 }
302
303 static int ion_buffer_put(struct ion_buffer *buffer)
304 {
305         return kref_put(&buffer->ref, _ion_buffer_destroy);
306 }
307
308 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
309 {
310         mutex_lock(&buffer->lock);
311         buffer->handle_count++;
312         mutex_unlock(&buffer->lock);
313 }
314
315 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
316 {
317         /*
318          * when a buffer is removed from a handle, if it is not in
319          * any other handles, copy the taskcomm and the pid of the
320          * process it's being removed from into the buffer.  At this
321          * point there will be no way to track what processes this buffer is
322          * being used by, it only exists as a dma_buf file descriptor.
323          * The taskcomm and pid can provide a debug hint as to where this fd
324          * is in the system
325          */
326         mutex_lock(&buffer->lock);
327         buffer->handle_count--;
328         BUG_ON(buffer->handle_count < 0);
329         if (!buffer->handle_count) {
330                 struct task_struct *task;
331
332                 task = current->group_leader;
333                 get_task_comm(buffer->task_comm, task);
334                 buffer->pid = task_pid_nr(task);
335         }
336         mutex_unlock(&buffer->lock);
337 }
338
339 static struct ion_handle *ion_handle_create(struct ion_client *client,
340                                      struct ion_buffer *buffer)
341 {
342         struct ion_handle *handle;
343
344         handle = kzalloc(sizeof(*handle), GFP_KERNEL);
345         if (!handle)
346                 return ERR_PTR(-ENOMEM);
347         kref_init(&handle->ref);
348         RB_CLEAR_NODE(&handle->node);
349         handle->client = client;
350         ion_buffer_get(buffer);
351         ion_buffer_add_to_handle(buffer);
352         handle->buffer = buffer;
353
354         return handle;
355 }
356
357 static void ion_handle_kmap_put(struct ion_handle *);
358
359 static void ion_handle_destroy(struct kref *kref)
360 {
361         struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
362         struct ion_client *client = handle->client;
363         struct ion_buffer *buffer = handle->buffer;
364
365         mutex_lock(&buffer->lock);
366         while (handle->kmap_cnt)
367                 ion_handle_kmap_put(handle);
368         mutex_unlock(&buffer->lock);
369
370         idr_remove(&client->idr, handle->id);
371         if (!RB_EMPTY_NODE(&handle->node))
372                 rb_erase(&handle->node, &client->handles);
373
374         ion_buffer_remove_from_handle(buffer);
375         ion_buffer_put(buffer);
376
377         kfree(handle);
378 }
379
380 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
381 {
382         return handle->buffer;
383 }
384
385 static void ion_handle_get(struct ion_handle *handle)
386 {
387         kref_get(&handle->ref);
388 }
389
390 static int ion_handle_put_nolock(struct ion_handle *handle)
391 {
392         int ret;
393
394         ret = kref_put(&handle->ref, ion_handle_destroy);
395
396         return ret;
397 }
398
399 static int ion_handle_put(struct ion_handle *handle)
400 {
401         struct ion_client *client = handle->client;
402         int ret;
403
404         mutex_lock(&client->lock);
405         ret = ion_handle_put_nolock(handle);
406         mutex_unlock(&client->lock);
407
408         return ret;
409 }
410
411 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
412                                             struct ion_buffer *buffer)
413 {
414         struct rb_node *n = client->handles.rb_node;
415
416         while (n) {
417                 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
418
419                 if (buffer < entry->buffer)
420                         n = n->rb_left;
421                 else if (buffer > entry->buffer)
422                         n = n->rb_right;
423                 else
424                         return entry;
425         }
426         return ERR_PTR(-EINVAL);
427 }
428
429 static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
430                                                 int id)
431 {
432         struct ion_handle *handle;
433
434         handle = idr_find(&client->idr, id);
435         if (handle)
436                 ion_handle_get(handle);
437
438         return handle ? handle : ERR_PTR(-EINVAL);
439 }
440
441 static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
442                                                int id)
443 {
444         struct ion_handle *handle;
445
446         mutex_lock(&client->lock);
447         handle = ion_handle_get_by_id_nolock(client, id);
448         mutex_unlock(&client->lock);
449
450         return handle;
451 }
452
453 static bool ion_handle_validate(struct ion_client *client,
454                                 struct ion_handle *handle)
455 {
456         WARN_ON(!mutex_is_locked(&client->lock));
457         return idr_find(&client->idr, handle->id) == handle;
458 }
459
460 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
461 {
462         int id;
463         struct rb_node **p = &client->handles.rb_node;
464         struct rb_node *parent = NULL;
465         struct ion_handle *entry;
466
467         id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
468         if (id < 0)
469                 return id;
470
471         handle->id = id;
472
473         while (*p) {
474                 parent = *p;
475                 entry = rb_entry(parent, struct ion_handle, node);
476
477                 if (handle->buffer < entry->buffer)
478                         p = &(*p)->rb_left;
479                 else if (handle->buffer > entry->buffer)
480                         p = &(*p)->rb_right;
481                 else
482                         WARN(1, "%s: buffer already found.", __func__);
483         }
484
485         rb_link_node(&handle->node, parent, p);
486         rb_insert_color(&handle->node, &client->handles);
487
488         return 0;
489 }
490
491 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
492                              size_t align, unsigned int heap_id_mask,
493                              unsigned int flags)
494 {
495         struct ion_handle *handle;
496         struct ion_device *dev = client->dev;
497         struct ion_buffer *buffer = NULL;
498         struct ion_heap *heap;
499         int ret;
500
501         pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
502                  len, align, heap_id_mask, flags);
503         /*
504          * traverse the list of heaps available in this system in priority
505          * order.  If the heap type is supported by the client, and matches the
506          * request of the caller allocate from it.  Repeat until allocate has
507          * succeeded or all heaps have been tried
508          */
509         len = PAGE_ALIGN(len);
510
511         if (!len)
512                 return ERR_PTR(-EINVAL);
513
514         down_read(&dev->lock);
515         plist_for_each_entry(heap, &dev->heaps, node) {
516                 /* if the caller didn't specify this heap id */
517                 if (!((1 << heap->id) & heap_id_mask))
518                         continue;
519                 buffer = ion_buffer_create(heap, dev, len, align, flags);
520                 if (!IS_ERR(buffer))
521                         break;
522         }
523         up_read(&dev->lock);
524
525         if (buffer == NULL)
526                 return ERR_PTR(-ENODEV);
527
528         if (IS_ERR(buffer))
529                 return ERR_CAST(buffer);
530
531         handle = ion_handle_create(client, buffer);
532
533         /*
534          * ion_buffer_create will create a buffer with a ref_cnt of 1,
535          * and ion_handle_create will take a second reference, drop one here
536          */
537         ion_buffer_put(buffer);
538
539         if (IS_ERR(handle))
540                 return handle;
541
542         mutex_lock(&client->lock);
543         ret = ion_handle_add(client, handle);
544         mutex_unlock(&client->lock);
545         if (ret) {
546                 ion_handle_put(handle);
547                 handle = ERR_PTR(ret);
548         }
549
550         return handle;
551 }
552 EXPORT_SYMBOL(ion_alloc);
553
554 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
555 {
556         bool valid_handle;
557
558         BUG_ON(client != handle->client);
559
560         valid_handle = ion_handle_validate(client, handle);
561
562         if (!valid_handle) {
563                 WARN(1, "%s: invalid handle passed to free.\n", __func__);
564                 return;
565         }
566         ion_handle_put_nolock(handle);
567 }
568
569 void ion_free(struct ion_client *client, struct ion_handle *handle)
570 {
571         BUG_ON(client != handle->client);
572
573         mutex_lock(&client->lock);
574         ion_free_nolock(client, handle);
575         mutex_unlock(&client->lock);
576 }
577 EXPORT_SYMBOL(ion_free);
578
579 int ion_phys(struct ion_client *client, struct ion_handle *handle,
580              ion_phys_addr_t *addr, size_t *len)
581 {
582         struct ion_buffer *buffer;
583         int ret;
584
585         mutex_lock(&client->lock);
586         if (!ion_handle_validate(client, handle)) {
587                 mutex_unlock(&client->lock);
588                 return -EINVAL;
589         }
590
591         buffer = handle->buffer;
592
593         if (!buffer->heap->ops->phys) {
594                 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
595                         __func__, buffer->heap->name, buffer->heap->type);
596                 mutex_unlock(&client->lock);
597                 return -ENODEV;
598         }
599         mutex_unlock(&client->lock);
600         ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
601         return ret;
602 }
603 EXPORT_SYMBOL(ion_phys);
604
605 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
606 {
607         void *vaddr;
608
609         if (buffer->kmap_cnt) {
610                 buffer->kmap_cnt++;
611                 return buffer->vaddr;
612         }
613         vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
614         if (WARN_ONCE(vaddr == NULL,
615                         "heap->ops->map_kernel should return ERR_PTR on error"))
616                 return ERR_PTR(-EINVAL);
617         if (IS_ERR(vaddr))
618                 return vaddr;
619         buffer->vaddr = vaddr;
620         buffer->kmap_cnt++;
621         return vaddr;
622 }
623
624 static void *ion_handle_kmap_get(struct ion_handle *handle)
625 {
626         struct ion_buffer *buffer = handle->buffer;
627         void *vaddr;
628
629         if (handle->kmap_cnt) {
630                 handle->kmap_cnt++;
631                 return buffer->vaddr;
632         }
633         vaddr = ion_buffer_kmap_get(buffer);
634         if (IS_ERR(vaddr))
635                 return vaddr;
636         handle->kmap_cnt++;
637         return vaddr;
638 }
639
640 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
641 {
642         buffer->kmap_cnt--;
643         if (!buffer->kmap_cnt) {
644                 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
645                 buffer->vaddr = NULL;
646         }
647 }
648
649 static void ion_handle_kmap_put(struct ion_handle *handle)
650 {
651         struct ion_buffer *buffer = handle->buffer;
652
653         if (!handle->kmap_cnt) {
654                 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
655                 return;
656         }
657         handle->kmap_cnt--;
658         if (!handle->kmap_cnt)
659                 ion_buffer_kmap_put(buffer);
660 }
661
662 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
663 {
664         struct ion_buffer *buffer;
665         void *vaddr;
666
667         mutex_lock(&client->lock);
668         if (!ion_handle_validate(client, handle)) {
669                 pr_err("%s: invalid handle passed to map_kernel.\n",
670                        __func__);
671                 mutex_unlock(&client->lock);
672                 return ERR_PTR(-EINVAL);
673         }
674
675         buffer = handle->buffer;
676
677         if (!handle->buffer->heap->ops->map_kernel) {
678                 pr_err("%s: map_kernel is not implemented by this heap.\n",
679                        __func__);
680                 mutex_unlock(&client->lock);
681                 return ERR_PTR(-ENODEV);
682         }
683
684         mutex_lock(&buffer->lock);
685         vaddr = ion_handle_kmap_get(handle);
686         mutex_unlock(&buffer->lock);
687         mutex_unlock(&client->lock);
688         return vaddr;
689 }
690 EXPORT_SYMBOL(ion_map_kernel);
691
692 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
693 {
694         struct ion_buffer *buffer;
695
696         mutex_lock(&client->lock);
697         buffer = handle->buffer;
698         mutex_lock(&buffer->lock);
699         ion_handle_kmap_put(handle);
700         mutex_unlock(&buffer->lock);
701         mutex_unlock(&client->lock);
702 }
703 EXPORT_SYMBOL(ion_unmap_kernel);
704
705 static struct mutex debugfs_mutex;
706 static struct rb_root *ion_root_client;
707 static int is_client_alive(struct ion_client *client)
708 {
709         struct rb_node *node;
710         struct ion_client *tmp;
711         struct ion_device *dev;
712
713         node = ion_root_client->rb_node;
714         dev = container_of(ion_root_client, struct ion_device, clients);
715
716         down_read(&dev->lock);
717         while (node) {
718                 tmp = rb_entry(node, struct ion_client, node);
719                 if (client < tmp) {
720                         node = node->rb_left;
721                 } else if (client > tmp) {
722                         node = node->rb_right;
723                 } else {
724                         up_read(&dev->lock);
725                         return 1;
726                 }
727         }
728
729         up_read(&dev->lock);
730         return 0;
731 }
732
733 static int ion_debug_client_show(struct seq_file *s, void *unused)
734 {
735         struct ion_client *client = s->private;
736         struct rb_node *n;
737         size_t sizes[ION_NUM_HEAP_IDS] = {0};
738         const char *names[ION_NUM_HEAP_IDS] = {NULL};
739         int i;
740
741         mutex_lock(&debugfs_mutex);
742         if (!is_client_alive(client)) {
743                 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
744                            client);
745                 mutex_unlock(&debugfs_mutex);
746                 return 0;
747         }
748
749         mutex_lock(&client->lock);
750         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
751                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
752                                                      node);
753                 unsigned int id = handle->buffer->heap->id;
754
755                 if (!names[id])
756                         names[id] = handle->buffer->heap->name;
757                 sizes[id] += handle->buffer->size;
758         }
759         mutex_unlock(&client->lock);
760         mutex_unlock(&debugfs_mutex);
761
762         seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
763         for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
764                 if (!names[i])
765                         continue;
766                 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
767         }
768         return 0;
769 }
770
771 static int ion_debug_client_open(struct inode *inode, struct file *file)
772 {
773         return single_open(file, ion_debug_client_show, inode->i_private);
774 }
775
776 static const struct file_operations debug_client_fops = {
777         .open = ion_debug_client_open,
778         .read = seq_read,
779         .llseek = seq_lseek,
780         .release = single_release,
781 };
782
783 static int ion_get_client_serial(const struct rb_root *root,
784                                         const unsigned char *name)
785 {
786         int serial = -1;
787         struct rb_node *node;
788
789         for (node = rb_first(root); node; node = rb_next(node)) {
790                 struct ion_client *client = rb_entry(node, struct ion_client,
791                                                 node);
792
793                 if (strcmp(client->name, name))
794                         continue;
795                 serial = max(serial, client->display_serial);
796         }
797         return serial + 1;
798 }
799
800 struct ion_client *ion_client_create(struct ion_device *dev,
801                                      const char *name)
802 {
803         struct ion_client *client;
804         struct task_struct *task;
805         struct rb_node **p;
806         struct rb_node *parent = NULL;
807         struct ion_client *entry;
808         pid_t pid;
809
810         if (!name) {
811                 pr_err("%s: Name cannot be null\n", __func__);
812                 return ERR_PTR(-EINVAL);
813         }
814
815         get_task_struct(current->group_leader);
816         task_lock(current->group_leader);
817         pid = task_pid_nr(current->group_leader);
818         /*
819          * don't bother to store task struct for kernel threads,
820          * they can't be killed anyway
821          */
822         if (current->group_leader->flags & PF_KTHREAD) {
823                 put_task_struct(current->group_leader);
824                 task = NULL;
825         } else {
826                 task = current->group_leader;
827         }
828         task_unlock(current->group_leader);
829
830         client = kzalloc(sizeof(*client), GFP_KERNEL);
831         if (!client)
832                 goto err_put_task_struct;
833
834         client->dev = dev;
835         client->handles = RB_ROOT;
836         idr_init(&client->idr);
837         mutex_init(&client->lock);
838         client->task = task;
839         client->pid = pid;
840         client->name = kstrdup(name, GFP_KERNEL);
841         if (!client->name)
842                 goto err_free_client;
843
844         down_write(&dev->lock);
845         client->display_serial = ion_get_client_serial(&dev->clients, name);
846         client->display_name = kasprintf(
847                 GFP_KERNEL, "%s-%d", name, client->display_serial);
848         if (!client->display_name) {
849                 up_write(&dev->lock);
850                 goto err_free_client_name;
851         }
852         p = &dev->clients.rb_node;
853         while (*p) {
854                 parent = *p;
855                 entry = rb_entry(parent, struct ion_client, node);
856
857                 if (client < entry)
858                         p = &(*p)->rb_left;
859                 else if (client > entry)
860                         p = &(*p)->rb_right;
861         }
862         rb_link_node(&client->node, parent, p);
863         rb_insert_color(&client->node, &dev->clients);
864
865         client->debug_root = debugfs_create_file(client->display_name, 0664,
866                                                 dev->clients_debug_root,
867                                                 client, &debug_client_fops);
868         if (!client->debug_root) {
869                 char buf[256], *path;
870
871                 path = dentry_path(dev->clients_debug_root, buf, 256);
872                 pr_err("Failed to create client debugfs at %s/%s\n",
873                         path, client->display_name);
874         }
875
876         up_write(&dev->lock);
877
878         return client;
879
880 err_free_client_name:
881         kfree(client->name);
882 err_free_client:
883         kfree(client);
884 err_put_task_struct:
885         if (task)
886                 put_task_struct(current->group_leader);
887         return ERR_PTR(-ENOMEM);
888 }
889 EXPORT_SYMBOL(ion_client_create);
890
891 void ion_client_destroy(struct ion_client *client)
892 {
893         struct ion_device *dev = client->dev;
894         struct rb_node *n;
895
896         pr_debug("%s: %d\n", __func__, __LINE__);
897         mutex_lock(&debugfs_mutex);
898         while ((n = rb_first(&client->handles))) {
899                 struct ion_handle *handle = rb_entry(n, struct ion_handle,
900                                                      node);
901                 ion_handle_destroy(&handle->ref);
902         }
903
904         idr_destroy(&client->idr);
905
906         down_write(&dev->lock);
907         if (client->task)
908                 put_task_struct(client->task);
909         rb_erase(&client->node, &dev->clients);
910         debugfs_remove_recursive(client->debug_root);
911         up_write(&dev->lock);
912
913         kfree(client->display_name);
914         kfree(client->name);
915         kfree(client);
916         mutex_unlock(&debugfs_mutex);
917 }
918 EXPORT_SYMBOL(ion_client_destroy);
919
920 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
921                                        struct device *dev,
922                                        enum dma_data_direction direction);
923
924 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
925                                         enum dma_data_direction direction)
926 {
927         struct dma_buf *dmabuf = attachment->dmabuf;
928         struct ion_buffer *buffer = dmabuf->priv;
929
930         ion_buffer_sync_for_device(buffer, attachment->dev, direction);
931         return buffer->sg_table;
932 }
933
934 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
935                               struct sg_table *table,
936                               enum dma_data_direction direction)
937 {
938 }
939
940 void ion_pages_sync_for_device(struct device *dev, struct page *page,
941                 size_t size, enum dma_data_direction dir)
942 {
943         struct scatterlist sg;
944
945         sg_init_table(&sg, 1);
946         sg_set_page(&sg, page, size, 0);
947         /*
948          * This is not correct - sg_dma_address needs a dma_addr_t that is valid
949          * for the targeted device, but this works on the currently targeted
950          * hardware.
951          */
952         sg_dma_address(&sg) = page_to_phys(page);
953         dma_sync_sg_for_device(dev, &sg, 1, dir);
954 }
955
956 struct ion_vma_list {
957         struct list_head list;
958         struct vm_area_struct *vma;
959 };
960
961 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
962                                        struct device *dev,
963                                        enum dma_data_direction dir)
964 {
965         struct ion_vma_list *vma_list;
966         int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
967         int i;
968
969         pr_debug("%s: syncing for device %s\n", __func__,
970                  dev ? dev_name(dev) : "null");
971
972         if (!ion_buffer_fault_user_mappings(buffer))
973                 return;
974
975         mutex_lock(&buffer->lock);
976         for (i = 0; i < pages; i++) {
977                 struct page *page = buffer->pages[i];
978
979                 if (ion_buffer_page_is_dirty(page))
980                         ion_pages_sync_for_device(dev, ion_buffer_page(page),
981                                                         PAGE_SIZE, dir);
982
983                 ion_buffer_page_clean(buffer->pages + i);
984         }
985         list_for_each_entry(vma_list, &buffer->vmas, list) {
986                 struct vm_area_struct *vma = vma_list->vma;
987
988                 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
989                                NULL);
990         }
991         mutex_unlock(&buffer->lock);
992 }
993
994 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
995 {
996         struct ion_buffer *buffer = vma->vm_private_data;
997         unsigned long pfn;
998         int ret;
999
1000         mutex_lock(&buffer->lock);
1001         ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1002         BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1003
1004         pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1005         ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1006         mutex_unlock(&buffer->lock);
1007         if (ret)
1008                 return VM_FAULT_ERROR;
1009
1010         return VM_FAULT_NOPAGE;
1011 }
1012
1013 static void ion_vm_open(struct vm_area_struct *vma)
1014 {
1015         struct ion_buffer *buffer = vma->vm_private_data;
1016         struct ion_vma_list *vma_list;
1017
1018         vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
1019         if (!vma_list)
1020                 return;
1021         vma_list->vma = vma;
1022         mutex_lock(&buffer->lock);
1023         list_add(&vma_list->list, &buffer->vmas);
1024         mutex_unlock(&buffer->lock);
1025         pr_debug("%s: adding %p\n", __func__, vma);
1026 }
1027
1028 static void ion_vm_close(struct vm_area_struct *vma)
1029 {
1030         struct ion_buffer *buffer = vma->vm_private_data;
1031         struct ion_vma_list *vma_list, *tmp;
1032
1033         pr_debug("%s\n", __func__);
1034         mutex_lock(&buffer->lock);
1035         list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1036                 if (vma_list->vma != vma)
1037                         continue;
1038                 list_del(&vma_list->list);
1039                 kfree(vma_list);
1040                 pr_debug("%s: deleting %p\n", __func__, vma);
1041                 break;
1042         }
1043         mutex_unlock(&buffer->lock);
1044 }
1045
1046 static const struct vm_operations_struct ion_vma_ops = {
1047         .open = ion_vm_open,
1048         .close = ion_vm_close,
1049         .fault = ion_vm_fault,
1050 };
1051
1052 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1053 {
1054         struct ion_buffer *buffer = dmabuf->priv;
1055         int ret = 0;
1056
1057         if (!buffer->heap->ops->map_user) {
1058                 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1059                         __func__);
1060                 return -EINVAL;
1061         }
1062
1063         if (ion_buffer_fault_user_mappings(buffer)) {
1064                 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1065                                                         VM_DONTDUMP;
1066                 vma->vm_private_data = buffer;
1067                 vma->vm_ops = &ion_vma_ops;
1068                 ion_vm_open(vma);
1069                 return 0;
1070         }
1071
1072         if (!(buffer->flags & ION_FLAG_CACHED))
1073                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1074
1075         mutex_lock(&buffer->lock);
1076         /* now map it to userspace */
1077         ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1078         mutex_unlock(&buffer->lock);
1079
1080         if (ret)
1081                 pr_err("%s: failure mapping buffer to userspace\n",
1082                        __func__);
1083
1084         return ret;
1085 }
1086
1087 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1088 {
1089         struct ion_buffer *buffer = dmabuf->priv;
1090
1091         ion_buffer_put(buffer);
1092 }
1093
1094 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1095 {
1096         struct ion_buffer *buffer = dmabuf->priv;
1097
1098         return buffer->vaddr + offset * PAGE_SIZE;
1099 }
1100
1101 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1102                                void *ptr)
1103 {
1104 }
1105
1106 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1107                                         enum dma_data_direction direction)
1108 {
1109         struct ion_buffer *buffer = dmabuf->priv;
1110         void *vaddr;
1111
1112         if (!buffer->heap->ops->map_kernel) {
1113                 pr_err("%s: map kernel is not implemented by this heap.\n",
1114                        __func__);
1115                 return -ENODEV;
1116         }
1117
1118         mutex_lock(&buffer->lock);
1119         vaddr = ion_buffer_kmap_get(buffer);
1120         mutex_unlock(&buffer->lock);
1121         return PTR_ERR_OR_ZERO(vaddr);
1122 }
1123
1124 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1125                                       enum dma_data_direction direction)
1126 {
1127         struct ion_buffer *buffer = dmabuf->priv;
1128
1129         mutex_lock(&buffer->lock);
1130         ion_buffer_kmap_put(buffer);
1131         mutex_unlock(&buffer->lock);
1132
1133         return 0;
1134 }
1135
1136 static struct dma_buf_ops dma_buf_ops = {
1137         .map_dma_buf = ion_map_dma_buf,
1138         .unmap_dma_buf = ion_unmap_dma_buf,
1139         .mmap = ion_mmap,
1140         .release = ion_dma_buf_release,
1141         .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1142         .end_cpu_access = ion_dma_buf_end_cpu_access,
1143         .kmap_atomic = ion_dma_buf_kmap,
1144         .kunmap_atomic = ion_dma_buf_kunmap,
1145         .kmap = ion_dma_buf_kmap,
1146         .kunmap = ion_dma_buf_kunmap,
1147 };
1148
1149 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1150                                                 struct ion_handle *handle)
1151 {
1152         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1153         struct ion_buffer *buffer;
1154         struct dma_buf *dmabuf;
1155         bool valid_handle;
1156
1157         mutex_lock(&client->lock);
1158         valid_handle = ion_handle_validate(client, handle);
1159         if (!valid_handle) {
1160                 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1161                 mutex_unlock(&client->lock);
1162                 return ERR_PTR(-EINVAL);
1163         }
1164         buffer = handle->buffer;
1165         ion_buffer_get(buffer);
1166         mutex_unlock(&client->lock);
1167
1168         exp_info.ops = &dma_buf_ops;
1169         exp_info.size = buffer->size;
1170         exp_info.flags = O_RDWR;
1171         exp_info.priv = buffer;
1172
1173         dmabuf = dma_buf_export(&exp_info);
1174         if (IS_ERR(dmabuf)) {
1175                 ion_buffer_put(buffer);
1176                 return dmabuf;
1177         }
1178
1179         return dmabuf;
1180 }
1181 EXPORT_SYMBOL(ion_share_dma_buf);
1182
1183 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1184 {
1185         struct dma_buf *dmabuf;
1186         int fd;
1187
1188         dmabuf = ion_share_dma_buf(client, handle);
1189         if (IS_ERR(dmabuf))
1190                 return PTR_ERR(dmabuf);
1191
1192         fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1193         if (fd < 0)
1194                 dma_buf_put(dmabuf);
1195
1196         return fd;
1197 }
1198 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1199
1200 struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1201                                       struct dma_buf *dmabuf)
1202 {
1203         struct ion_buffer *buffer;
1204         struct ion_handle *handle;
1205         int ret;
1206
1207         /* if this memory came from ion */
1208
1209         if (dmabuf->ops != &dma_buf_ops) {
1210                 pr_err("%s: can not import dmabuf from another exporter\n",
1211                        __func__);
1212                 return ERR_PTR(-EINVAL);
1213         }
1214         buffer = dmabuf->priv;
1215
1216         mutex_lock(&client->lock);
1217         /* if a handle exists for this buffer just take a reference to it */
1218         handle = ion_handle_lookup(client, buffer);
1219         if (!IS_ERR(handle)) {
1220                 ion_handle_get(handle);
1221                 mutex_unlock(&client->lock);
1222                 goto end;
1223         }
1224
1225         handle = ion_handle_create(client, buffer);
1226         if (IS_ERR(handle)) {
1227                 mutex_unlock(&client->lock);
1228                 goto end;
1229         }
1230
1231         ret = ion_handle_add(client, handle);
1232         mutex_unlock(&client->lock);
1233         if (ret) {
1234                 ion_handle_put(handle);
1235                 handle = ERR_PTR(ret);
1236         }
1237
1238 end:
1239         return handle;
1240 }
1241 EXPORT_SYMBOL(ion_import_dma_buf);
1242
1243 struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1244 {
1245         struct dma_buf *dmabuf;
1246         struct ion_handle *handle;
1247
1248         dmabuf = dma_buf_get(fd);
1249         if (IS_ERR(dmabuf))
1250                 return ERR_CAST(dmabuf);
1251
1252         handle = ion_import_dma_buf(client, dmabuf);
1253         dma_buf_put(dmabuf);
1254         return handle;
1255 }
1256 EXPORT_SYMBOL(ion_import_dma_buf_fd);
1257
1258 static int ion_sync_for_device(struct ion_client *client, int fd)
1259 {
1260         struct dma_buf *dmabuf;
1261         struct ion_buffer *buffer;
1262
1263         dmabuf = dma_buf_get(fd);
1264         if (IS_ERR(dmabuf))
1265                 return PTR_ERR(dmabuf);
1266
1267         /* if this memory came from ion */
1268         if (dmabuf->ops != &dma_buf_ops) {
1269                 pr_err("%s: can not sync dmabuf from another exporter\n",
1270                        __func__);
1271                 dma_buf_put(dmabuf);
1272                 return -EINVAL;
1273         }
1274         buffer = dmabuf->priv;
1275
1276         dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1277                                buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1278         dma_buf_put(dmabuf);
1279         return 0;
1280 }
1281
1282 /* fix up the cases where the ioctl direction bits are incorrect */
1283 static unsigned int ion_ioctl_dir(unsigned int cmd)
1284 {
1285         switch (cmd) {
1286         case ION_IOC_SYNC:
1287         case ION_IOC_FREE:
1288         case ION_IOC_CUSTOM:
1289                 return _IOC_WRITE;
1290         default:
1291                 return _IOC_DIR(cmd);
1292         }
1293 }
1294
1295 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1296 {
1297         struct ion_client *client = filp->private_data;
1298         struct ion_device *dev = client->dev;
1299         struct ion_handle *cleanup_handle = NULL;
1300         int ret = 0;
1301         unsigned int dir;
1302
1303         union {
1304                 struct ion_fd_data fd;
1305                 struct ion_allocation_data allocation;
1306                 struct ion_handle_data handle;
1307                 struct ion_custom_data custom;
1308         } data;
1309
1310         dir = ion_ioctl_dir(cmd);
1311
1312         if (_IOC_SIZE(cmd) > sizeof(data))
1313                 return -EINVAL;
1314
1315         if (dir & _IOC_WRITE)
1316                 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1317                         return -EFAULT;
1318
1319         switch (cmd) {
1320         case ION_IOC_ALLOC:
1321         {
1322                 struct ion_handle *handle;
1323
1324                 handle = ion_alloc(client, data.allocation.len,
1325                                                 data.allocation.align,
1326                                                 data.allocation.heap_id_mask,
1327                                                 data.allocation.flags);
1328                 if (IS_ERR(handle))
1329                         return PTR_ERR(handle);
1330
1331                 data.allocation.handle = handle->id;
1332
1333                 cleanup_handle = handle;
1334                 break;
1335         }
1336         case ION_IOC_FREE:
1337         {
1338                 struct ion_handle *handle;
1339
1340                 mutex_lock(&client->lock);
1341                 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1342                 if (IS_ERR(handle)) {
1343                         mutex_unlock(&client->lock);
1344                         return PTR_ERR(handle);
1345                 }
1346                 ion_free_nolock(client, handle);
1347                 ion_handle_put_nolock(handle);
1348                 mutex_unlock(&client->lock);
1349                 break;
1350         }
1351         case ION_IOC_SHARE:
1352         case ION_IOC_MAP:
1353         {
1354                 struct ion_handle *handle;
1355
1356                 handle = ion_handle_get_by_id(client, data.handle.handle);
1357                 if (IS_ERR(handle))
1358                         return PTR_ERR(handle);
1359                 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1360                 ion_handle_put(handle);
1361                 if (data.fd.fd < 0)
1362                         ret = data.fd.fd;
1363                 break;
1364         }
1365         case ION_IOC_IMPORT:
1366         {
1367                 struct ion_handle *handle;
1368
1369                 handle = ion_import_dma_buf_fd(client, data.fd.fd);
1370                 if (IS_ERR(handle))
1371                         ret = PTR_ERR(handle);
1372                 else
1373                         data.handle.handle = handle->id;
1374                 break;
1375         }
1376         case ION_IOC_SYNC:
1377         {
1378                 ret = ion_sync_for_device(client, data.fd.fd);
1379                 break;
1380         }
1381         case ION_IOC_CUSTOM:
1382         {
1383                 if (!dev->custom_ioctl)
1384                         return -ENOTTY;
1385                 ret = dev->custom_ioctl(client, data.custom.cmd,
1386                                                 data.custom.arg);
1387                 break;
1388         }
1389         default:
1390                 return -ENOTTY;
1391         }
1392
1393         if (dir & _IOC_READ) {
1394                 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1395                         if (cleanup_handle)
1396                                 ion_free(client, cleanup_handle);
1397                         return -EFAULT;
1398                 }
1399         }
1400         return ret;
1401 }
1402
1403 static int ion_release(struct inode *inode, struct file *file)
1404 {
1405         struct ion_client *client = file->private_data;
1406
1407         pr_debug("%s: %d\n", __func__, __LINE__);
1408         ion_client_destroy(client);
1409         return 0;
1410 }
1411
1412 static int ion_open(struct inode *inode, struct file *file)
1413 {
1414         struct miscdevice *miscdev = file->private_data;
1415         struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1416         struct ion_client *client;
1417         char debug_name[64];
1418
1419         pr_debug("%s: %d\n", __func__, __LINE__);
1420         snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1421         client = ion_client_create(dev, debug_name);
1422         if (IS_ERR(client))
1423                 return PTR_ERR(client);
1424         file->private_data = client;
1425
1426         return 0;
1427 }
1428
1429 static const struct file_operations ion_fops = {
1430         .owner          = THIS_MODULE,
1431         .open           = ion_open,
1432         .release        = ion_release,
1433         .unlocked_ioctl = ion_ioctl,
1434         .compat_ioctl   = compat_ion_ioctl,
1435 };
1436
1437 static size_t ion_debug_heap_total(struct ion_client *client,
1438                                    unsigned int id)
1439 {
1440         size_t size = 0;
1441         struct rb_node *n;
1442
1443         mutex_lock(&client->lock);
1444         for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1445                 struct ion_handle *handle = rb_entry(n,
1446                                                      struct ion_handle,
1447                                                      node);
1448                 if (handle->buffer->heap->id == id)
1449                         size += handle->buffer->size;
1450         }
1451         mutex_unlock(&client->lock);
1452         return size;
1453 }
1454
1455 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1456 {
1457         struct ion_heap *heap = s->private;
1458         struct ion_device *dev = heap->dev;
1459         struct rb_node *n;
1460         size_t total_size = 0;
1461         size_t total_orphaned_size = 0;
1462
1463         seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1464         seq_puts(s, "----------------------------------------------------\n");
1465
1466         mutex_lock(&debugfs_mutex);
1467         for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1468                 struct ion_client *client = rb_entry(n, struct ion_client,
1469                                                      node);
1470                 size_t size = ion_debug_heap_total(client, heap->id);
1471
1472                 if (!size)
1473                         continue;
1474                 if (client->task) {
1475                         char task_comm[TASK_COMM_LEN];
1476
1477                         get_task_comm(task_comm, client->task);
1478                         seq_printf(s, "%16s %16u %16zu\n", task_comm,
1479                                    client->pid, size);
1480                 } else {
1481                         seq_printf(s, "%16s %16u %16zu\n", client->name,
1482                                    client->pid, size);
1483                 }
1484         }
1485         mutex_unlock(&debugfs_mutex);
1486
1487         seq_puts(s, "----------------------------------------------------\n");
1488         seq_puts(s, "orphaned allocations (info is from last known client):\n");
1489         mutex_lock(&dev->buffer_lock);
1490         for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1491                 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1492                                                      node);
1493                 if (buffer->heap->id != heap->id)
1494                         continue;
1495                 total_size += buffer->size;
1496                 if (!buffer->handle_count) {
1497                         seq_printf(s, "%16s %16u %16zu %d %d\n",
1498                                    buffer->task_comm, buffer->pid,
1499                                    buffer->size, buffer->kmap_cnt,
1500                                    atomic_read(&buffer->ref.refcount));
1501                         total_orphaned_size += buffer->size;
1502                 }
1503         }
1504         mutex_unlock(&dev->buffer_lock);
1505         seq_puts(s, "----------------------------------------------------\n");
1506         seq_printf(s, "%16s %16zu\n", "total orphaned",
1507                    total_orphaned_size);
1508         seq_printf(s, "%16s %16zu\n", "total ", total_size);
1509         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1510                 seq_printf(s, "%16s %16zu\n", "deferred free",
1511                                 heap->free_list_size);
1512         seq_puts(s, "----------------------------------------------------\n");
1513
1514         if (heap->debug_show)
1515                 heap->debug_show(heap, s, unused);
1516
1517         return 0;
1518 }
1519
1520 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1521 {
1522         return single_open(file, ion_debug_heap_show, inode->i_private);
1523 }
1524
1525 static const struct file_operations debug_heap_fops = {
1526         .open = ion_debug_heap_open,
1527         .read = seq_read,
1528         .llseek = seq_lseek,
1529         .release = single_release,
1530 };
1531
1532 static int debug_shrink_set(void *data, u64 val)
1533 {
1534         struct ion_heap *heap = data;
1535         struct shrink_control sc;
1536         int objs;
1537
1538         sc.gfp_mask = GFP_HIGHUSER;
1539         sc.nr_to_scan = val;
1540
1541         if (!val) {
1542                 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1543                 sc.nr_to_scan = objs;
1544         }
1545
1546         heap->shrinker.scan_objects(&heap->shrinker, &sc);
1547         return 0;
1548 }
1549
1550 static int debug_shrink_get(void *data, u64 *val)
1551 {
1552         struct ion_heap *heap = data;
1553         struct shrink_control sc;
1554         int objs;
1555
1556         sc.gfp_mask = GFP_HIGHUSER;
1557         sc.nr_to_scan = 0;
1558
1559         objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1560         *val = objs;
1561         return 0;
1562 }
1563
1564 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1565                         debug_shrink_set, "%llu\n");
1566
1567 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1568 {
1569         struct dentry *debug_file;
1570
1571         if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1572             !heap->ops->unmap_dma)
1573                 pr_err("%s: can not add heap with invalid ops struct.\n",
1574                        __func__);
1575
1576         spin_lock_init(&heap->free_lock);
1577         heap->free_list_size = 0;
1578
1579         if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1580                 ion_heap_init_deferred_free(heap);
1581
1582         if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1583                 ion_heap_init_shrinker(heap);
1584
1585         heap->dev = dev;
1586         down_write(&dev->lock);
1587         /*
1588          * use negative heap->id to reverse the priority -- when traversing
1589          * the list later attempt higher id numbers first
1590          */
1591         plist_node_init(&heap->node, -heap->id);
1592         plist_add(&heap->node, &dev->heaps);
1593         debug_file = debugfs_create_file(heap->name, 0664,
1594                                         dev->heaps_debug_root, heap,
1595                                         &debug_heap_fops);
1596
1597         if (!debug_file) {
1598                 char buf[256], *path;
1599
1600                 path = dentry_path(dev->heaps_debug_root, buf, 256);
1601                 pr_err("Failed to create heap debugfs at %s/%s\n",
1602                         path, heap->name);
1603         }
1604
1605         if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1606                 char debug_name[64];
1607
1608                 snprintf(debug_name, 64, "%s_shrink", heap->name);
1609                 debug_file = debugfs_create_file(
1610                         debug_name, 0644, dev->heaps_debug_root, heap,
1611                         &debug_shrink_fops);
1612                 if (!debug_file) {
1613                         char buf[256], *path;
1614
1615                         path = dentry_path(dev->heaps_debug_root, buf, 256);
1616                         pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1617                                 path, debug_name);
1618                 }
1619         }
1620
1621         up_write(&dev->lock);
1622 }
1623 EXPORT_SYMBOL(ion_device_add_heap);
1624
1625 struct ion_device *ion_device_create(long (*custom_ioctl)
1626                                      (struct ion_client *client,
1627                                       unsigned int cmd,
1628                                       unsigned long arg))
1629 {
1630         struct ion_device *idev;
1631         int ret;
1632
1633         idev = kzalloc(sizeof(*idev), GFP_KERNEL);
1634         if (!idev)
1635                 return ERR_PTR(-ENOMEM);
1636
1637         idev->dev.minor = MISC_DYNAMIC_MINOR;
1638         idev->dev.name = "ion";
1639         idev->dev.fops = &ion_fops;
1640         idev->dev.parent = NULL;
1641         ret = misc_register(&idev->dev);
1642         if (ret) {
1643                 pr_err("ion: failed to register misc device.\n");
1644                 kfree(idev);
1645                 return ERR_PTR(ret);
1646         }
1647
1648         idev->debug_root = debugfs_create_dir("ion", NULL);
1649         if (!idev->debug_root) {
1650                 pr_err("ion: failed to create debugfs root directory.\n");
1651                 goto debugfs_done;
1652         }
1653         idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1654         if (!idev->heaps_debug_root) {
1655                 pr_err("ion: failed to create debugfs heaps directory.\n");
1656                 goto debugfs_done;
1657         }
1658         idev->clients_debug_root = debugfs_create_dir("clients",
1659                                                 idev->debug_root);
1660         if (!idev->clients_debug_root)
1661                 pr_err("ion: failed to create debugfs clients directory.\n");
1662
1663 debugfs_done:
1664
1665         idev->custom_ioctl = custom_ioctl;
1666         idev->buffers = RB_ROOT;
1667         mutex_init(&idev->buffer_lock);
1668         init_rwsem(&idev->lock);
1669         plist_head_init(&idev->heaps);
1670         idev->clients = RB_ROOT;
1671         ion_root_client = &idev->clients;
1672         mutex_init(&debugfs_mutex);
1673         return idev;
1674 }
1675 EXPORT_SYMBOL(ion_device_create);
1676
1677 void ion_device_destroy(struct ion_device *dev)
1678 {
1679         misc_deregister(&dev->dev);
1680         debugfs_remove_recursive(dev->debug_root);
1681         /* XXX need to free the heaps and clients ? */
1682         kfree(dev);
1683 }
1684 EXPORT_SYMBOL(ion_device_destroy);
1685
1686 void __init ion_reserve(struct ion_platform_data *data)
1687 {
1688         int i;
1689
1690         for (i = 0; i < data->nr; i++) {
1691                 if (data->heaps[i].size == 0)
1692                         continue;
1693
1694                 if (data->heaps[i].base == 0) {
1695                         phys_addr_t paddr;
1696
1697                         paddr = memblock_alloc_base(data->heaps[i].size,
1698                                                     data->heaps[i].align,
1699                                                     MEMBLOCK_ALLOC_ANYWHERE);
1700                         if (!paddr) {
1701                                 pr_err("%s: error allocating memblock for heap %d\n",
1702                                         __func__, i);
1703                                 continue;
1704                         }
1705                         data->heaps[i].base = paddr;
1706                 } else {
1707                         int ret = memblock_reserve(data->heaps[i].base,
1708                                                data->heaps[i].size);
1709                         if (ret)
1710                                 pr_err("memblock reserve of %zx@%lx failed\n",
1711                                        data->heaps[i].size,
1712                                        data->heaps[i].base);
1713                 }
1714                 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1715                         data->heaps[i].name,
1716                         data->heaps[i].base,
1717                         data->heaps[i].size);
1718         }
1719 }