2 * drivers/staging/android/ion/ion_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/err.h>
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
21 #include <linux/rtmutex.h>
22 #include <linux/sched.h>
23 #include <linux/scatterlist.h>
24 #include <linux/vmalloc.h>
28 void *ion_heap_map_kernel(struct ion_heap *heap,
29 struct ion_buffer *buffer)
31 struct scatterlist *sg;
35 struct sg_table *table = buffer->sg_table;
36 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
37 struct page **pages = vmalloc(sizeof(struct page *) * npages);
38 struct page **tmp = pages;
43 if (buffer->flags & ION_FLAG_CACHED)
46 pgprot = pgprot_writecombine(PAGE_KERNEL);
48 for_each_sg(table->sgl, sg, table->nents, i) {
49 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
50 struct page *page = sg_page(sg);
53 for (j = 0; j < npages_this_entry; j++)
56 vaddr = vmap(pages, npages, VM_MAP, pgprot);
60 return ERR_PTR(-ENOMEM);
65 void ion_heap_unmap_kernel(struct ion_heap *heap,
66 struct ion_buffer *buffer)
68 vunmap(buffer->vaddr);
71 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
72 struct vm_area_struct *vma)
74 struct sg_table *table = buffer->sg_table;
75 unsigned long addr = vma->vm_start;
76 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
77 struct scatterlist *sg;
81 for_each_sg(table->sgl, sg, table->nents, i) {
82 struct page *page = sg_page(sg);
83 unsigned long remainder = vma->vm_end - addr;
84 unsigned long len = sg->length;
86 if (offset >= sg->length) {
90 page += offset / PAGE_SIZE;
91 len = sg->length - offset;
94 len = min(len, remainder);
95 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
100 if (addr >= vma->vm_end)
106 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
108 void *addr = vm_map_ram(pages, num, -1, pgprot);
112 memset(addr, 0, PAGE_SIZE * num);
113 vm_unmap_ram(addr, num);
118 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
123 struct sg_page_iter piter;
124 struct page *pages[32];
126 for_each_sg_page(sgl, &piter, nents, 0) {
127 pages[p++] = sg_page_iter_page(&piter);
128 if (p == ARRAY_SIZE(pages)) {
129 ret = ion_heap_clear_pages(pages, p, pgprot);
136 ret = ion_heap_clear_pages(pages, p, pgprot);
141 int ion_heap_buffer_zero(struct ion_buffer *buffer)
143 struct sg_table *table = buffer->sg_table;
146 if (buffer->flags & ION_FLAG_CACHED)
147 pgprot = PAGE_KERNEL;
149 pgprot = pgprot_writecombine(PAGE_KERNEL);
151 return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
154 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
156 struct scatterlist sg;
158 sg_init_table(&sg, 1);
159 sg_set_page(&sg, page, size, 0);
160 return ion_heap_sglist_zero(&sg, 1, pgprot);
163 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
165 spin_lock(&heap->free_lock);
166 list_add(&buffer->list, &heap->free_list);
167 heap->free_list_size += buffer->size;
168 spin_unlock(&heap->free_lock);
169 wake_up(&heap->waitqueue);
172 size_t ion_heap_freelist_size(struct ion_heap *heap)
176 spin_lock(&heap->free_lock);
177 size = heap->free_list_size;
178 spin_unlock(&heap->free_lock);
183 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
186 struct ion_buffer *buffer;
187 size_t total_drained = 0;
189 if (ion_heap_freelist_size(heap) == 0)
192 spin_lock(&heap->free_lock);
194 size = heap->free_list_size;
196 while (!list_empty(&heap->free_list)) {
197 if (total_drained >= size)
199 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
201 list_del(&buffer->list);
202 heap->free_list_size -= buffer->size;
204 buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
205 total_drained += buffer->size;
206 spin_unlock(&heap->free_lock);
207 ion_buffer_destroy(buffer);
208 spin_lock(&heap->free_lock);
210 spin_unlock(&heap->free_lock);
212 return total_drained;
215 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
217 return _ion_heap_freelist_drain(heap, size, false);
220 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
222 return _ion_heap_freelist_drain(heap, size, true);
225 static int ion_heap_deferred_free(void *data)
227 struct ion_heap *heap = data;
230 struct ion_buffer *buffer;
232 wait_event_freezable(heap->waitqueue,
233 ion_heap_freelist_size(heap) > 0);
235 spin_lock(&heap->free_lock);
236 if (list_empty(&heap->free_list)) {
237 spin_unlock(&heap->free_lock);
240 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
242 list_del(&buffer->list);
243 heap->free_list_size -= buffer->size;
244 spin_unlock(&heap->free_lock);
245 ion_buffer_destroy(buffer);
251 int ion_heap_init_deferred_free(struct ion_heap *heap)
253 struct sched_param param = { .sched_priority = 0 };
255 INIT_LIST_HEAD(&heap->free_list);
256 init_waitqueue_head(&heap->waitqueue);
257 heap->task = kthread_run(ion_heap_deferred_free, heap,
259 if (IS_ERR(heap->task)) {
260 pr_err("%s: creating thread for deferred free failed\n",
262 return PTR_ERR_OR_ZERO(heap->task);
264 sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
268 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
269 struct shrink_control *sc)
271 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
275 total = ion_heap_freelist_size(heap) / PAGE_SIZE;
276 if (heap->ops->shrink)
277 total += heap->ops->shrink(heap, sc->gfp_mask, 0);
281 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
282 struct shrink_control *sc)
284 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
287 int to_scan = sc->nr_to_scan;
293 * shrink the free list first, no point in zeroing the memory if we're
294 * just going to reclaim it. Also, skip any possible page pooling.
296 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
297 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
304 if (heap->ops->shrink)
305 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
309 void ion_heap_init_shrinker(struct ion_heap *heap)
311 heap->shrinker.count_objects = ion_heap_shrink_count;
312 heap->shrinker.scan_objects = ion_heap_shrink_scan;
313 heap->shrinker.seeks = DEFAULT_SEEKS;
314 heap->shrinker.batch = 0;
315 register_shrinker(&heap->shrinker);
318 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
320 struct ion_heap *heap = NULL;
322 switch (heap_data->type) {
323 case ION_HEAP_TYPE_SYSTEM_CONTIG:
324 heap = ion_system_contig_heap_create(heap_data);
326 case ION_HEAP_TYPE_SYSTEM:
327 heap = ion_system_heap_create(heap_data);
329 case ION_HEAP_TYPE_CARVEOUT:
330 heap = ion_carveout_heap_create(heap_data);
332 case ION_HEAP_TYPE_CHUNK:
333 heap = ion_chunk_heap_create(heap_data);
335 case ION_HEAP_TYPE_DMA:
336 heap = ion_cma_heap_create(heap_data);
339 pr_err("%s: Invalid heap type %d\n", __func__,
341 return ERR_PTR(-EINVAL);
344 if (IS_ERR_OR_NULL(heap)) {
345 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
346 __func__, heap_data->name, heap_data->type,
347 heap_data->base, heap_data->size);
348 return ERR_PTR(-EINVAL);
351 heap->name = heap_data->name;
352 heap->id = heap_data->id;
355 EXPORT_SYMBOL(ion_heap_create);
357 void ion_heap_destroy(struct ion_heap *heap)
362 switch (heap->type) {
363 case ION_HEAP_TYPE_SYSTEM_CONTIG:
364 ion_system_contig_heap_destroy(heap);
366 case ION_HEAP_TYPE_SYSTEM:
367 ion_system_heap_destroy(heap);
369 case ION_HEAP_TYPE_CARVEOUT:
370 ion_carveout_heap_destroy(heap);
372 case ION_HEAP_TYPE_CHUNK:
373 ion_chunk_heap_destroy(heap);
375 case ION_HEAP_TYPE_DMA:
376 ion_cma_heap_destroy(heap);
379 pr_err("%s: Invalid heap type %d\n", __func__,
383 EXPORT_SYMBOL(ion_heap_destroy);