2 * drivers/staging/android/ion/ion_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/err.h>
18 #include <linux/freezer.h>
19 #include <linux/kthread.h>
21 #include <linux/rtmutex.h>
22 #include <linux/sched.h>
23 #include <uapi/linux/sched/types.h>
24 #include <linux/scatterlist.h>
25 #include <linux/vmalloc.h>
29 void *ion_heap_map_kernel(struct ion_heap *heap,
30 struct ion_buffer *buffer)
32 struct scatterlist *sg;
36 struct sg_table *table = buffer->sg_table;
37 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
38 struct page **pages = vmalloc(sizeof(struct page *) * npages);
39 struct page **tmp = pages;
44 if (buffer->flags & ION_FLAG_CACHED)
47 pgprot = pgprot_writecombine(PAGE_KERNEL);
49 for_each_sg(table->sgl, sg, table->nents, i) {
50 int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
51 struct page *page = sg_page(sg);
54 for (j = 0; j < npages_this_entry; j++)
57 vaddr = vmap(pages, npages, VM_MAP, pgprot);
61 return ERR_PTR(-ENOMEM);
66 void ion_heap_unmap_kernel(struct ion_heap *heap,
67 struct ion_buffer *buffer)
69 vunmap(buffer->vaddr);
72 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
73 struct vm_area_struct *vma)
75 struct sg_table *table = buffer->sg_table;
76 unsigned long addr = vma->vm_start;
77 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
78 struct scatterlist *sg;
82 for_each_sg(table->sgl, sg, table->nents, i) {
83 struct page *page = sg_page(sg);
84 unsigned long remainder = vma->vm_end - addr;
85 unsigned long len = sg->length;
87 if (offset >= sg->length) {
91 page += offset / PAGE_SIZE;
92 len = sg->length - offset;
95 len = min(len, remainder);
96 ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
101 if (addr >= vma->vm_end)
107 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
109 void *addr = vm_map_ram(pages, num, -1, pgprot);
113 memset(addr, 0, PAGE_SIZE * num);
114 vm_unmap_ram(addr, num);
119 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
124 struct sg_page_iter piter;
125 struct page *pages[32];
127 for_each_sg_page(sgl, &piter, nents, 0) {
128 pages[p++] = sg_page_iter_page(&piter);
129 if (p == ARRAY_SIZE(pages)) {
130 ret = ion_heap_clear_pages(pages, p, pgprot);
137 ret = ion_heap_clear_pages(pages, p, pgprot);
142 int ion_heap_buffer_zero(struct ion_buffer *buffer)
144 struct sg_table *table = buffer->sg_table;
147 if (buffer->flags & ION_FLAG_CACHED)
148 pgprot = PAGE_KERNEL;
150 pgprot = pgprot_writecombine(PAGE_KERNEL);
152 return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
155 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
157 struct scatterlist sg;
159 sg_init_table(&sg, 1);
160 sg_set_page(&sg, page, size, 0);
161 return ion_heap_sglist_zero(&sg, 1, pgprot);
164 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
166 spin_lock(&heap->free_lock);
167 list_add(&buffer->list, &heap->free_list);
168 heap->free_list_size += buffer->size;
169 spin_unlock(&heap->free_lock);
170 wake_up(&heap->waitqueue);
173 size_t ion_heap_freelist_size(struct ion_heap *heap)
177 spin_lock(&heap->free_lock);
178 size = heap->free_list_size;
179 spin_unlock(&heap->free_lock);
184 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
187 struct ion_buffer *buffer;
188 size_t total_drained = 0;
190 if (ion_heap_freelist_size(heap) == 0)
193 spin_lock(&heap->free_lock);
195 size = heap->free_list_size;
197 while (!list_empty(&heap->free_list)) {
198 if (total_drained >= size)
200 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
202 list_del(&buffer->list);
203 heap->free_list_size -= buffer->size;
205 buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
206 total_drained += buffer->size;
207 spin_unlock(&heap->free_lock);
208 ion_buffer_destroy(buffer);
209 spin_lock(&heap->free_lock);
211 spin_unlock(&heap->free_lock);
213 return total_drained;
216 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
218 return _ion_heap_freelist_drain(heap, size, false);
221 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
223 return _ion_heap_freelist_drain(heap, size, true);
226 static int ion_heap_deferred_free(void *data)
228 struct ion_heap *heap = data;
231 struct ion_buffer *buffer;
233 wait_event_freezable(heap->waitqueue,
234 ion_heap_freelist_size(heap) > 0);
236 spin_lock(&heap->free_lock);
237 if (list_empty(&heap->free_list)) {
238 spin_unlock(&heap->free_lock);
241 buffer = list_first_entry(&heap->free_list, struct ion_buffer,
243 list_del(&buffer->list);
244 heap->free_list_size -= buffer->size;
245 spin_unlock(&heap->free_lock);
246 ion_buffer_destroy(buffer);
252 int ion_heap_init_deferred_free(struct ion_heap *heap)
254 struct sched_param param = { .sched_priority = 0 };
256 INIT_LIST_HEAD(&heap->free_list);
257 init_waitqueue_head(&heap->waitqueue);
258 heap->task = kthread_run(ion_heap_deferred_free, heap,
260 if (IS_ERR(heap->task)) {
261 pr_err("%s: creating thread for deferred free failed\n",
263 return PTR_ERR_OR_ZERO(heap->task);
265 sched_setscheduler(heap->task, SCHED_IDLE, ¶m);
269 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
270 struct shrink_control *sc)
272 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
276 total = ion_heap_freelist_size(heap) / PAGE_SIZE;
277 if (heap->ops->shrink)
278 total += heap->ops->shrink(heap, sc->gfp_mask, 0);
282 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
283 struct shrink_control *sc)
285 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
288 int to_scan = sc->nr_to_scan;
294 * shrink the free list first, no point in zeroing the memory if we're
295 * just going to reclaim it. Also, skip any possible page pooling.
297 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
298 freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
305 if (heap->ops->shrink)
306 freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
310 void ion_heap_init_shrinker(struct ion_heap *heap)
312 heap->shrinker.count_objects = ion_heap_shrink_count;
313 heap->shrinker.scan_objects = ion_heap_shrink_scan;
314 heap->shrinker.seeks = DEFAULT_SEEKS;
315 heap->shrinker.batch = 0;
316 register_shrinker(&heap->shrinker);
319 struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
321 struct ion_heap *heap = NULL;
323 switch (heap_data->type) {
324 case ION_HEAP_TYPE_SYSTEM_CONTIG:
325 heap = ion_system_contig_heap_create(heap_data);
327 case ION_HEAP_TYPE_SYSTEM:
328 heap = ion_system_heap_create(heap_data);
330 case ION_HEAP_TYPE_CARVEOUT:
331 heap = ion_carveout_heap_create(heap_data);
333 case ION_HEAP_TYPE_CHUNK:
334 heap = ion_chunk_heap_create(heap_data);
336 case ION_HEAP_TYPE_DMA:
337 heap = ion_cma_heap_create(heap_data);
340 pr_err("%s: Invalid heap type %d\n", __func__,
342 return ERR_PTR(-EINVAL);
345 if (IS_ERR_OR_NULL(heap)) {
346 pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
347 __func__, heap_data->name, heap_data->type,
348 heap_data->base, heap_data->size);
349 return ERR_PTR(-EINVAL);
352 heap->name = heap_data->name;
353 heap->id = heap_data->id;
356 EXPORT_SYMBOL(ion_heap_create);
358 void ion_heap_destroy(struct ion_heap *heap)
363 switch (heap->type) {
364 case ION_HEAP_TYPE_SYSTEM_CONTIG:
365 ion_system_contig_heap_destroy(heap);
367 case ION_HEAP_TYPE_SYSTEM:
368 ion_system_heap_destroy(heap);
370 case ION_HEAP_TYPE_CARVEOUT:
371 ion_carveout_heap_destroy(heap);
373 case ION_HEAP_TYPE_CHUNK:
374 ion_chunk_heap_destroy(heap);
376 case ION_HEAP_TYPE_DMA:
377 ion_cma_heap_destroy(heap);
380 pr_err("%s: Invalid heap type %d\n", __func__,
384 EXPORT_SYMBOL(ion_heap_destroy);