2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
14 #define pr_fmt(fmt) "cma: " fmt
16 #ifdef CONFIG_CMA_DEBUG
23 #include <asm/dma-contiguous.h>
25 #include <linux/memblock.h>
26 #include <linux/err.h>
28 #include <linux/mutex.h>
29 #include <linux/page-isolation.h>
30 #include <linux/sizes.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/mm_types.h>
34 #include <linux/dma-contiguous.h>
37 unsigned long base_pfn;
39 unsigned long *bitmap;
43 struct cma *dma_contiguous_default_area;
45 #ifdef CONFIG_CMA_SIZE_MBYTES
46 #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
48 #define CMA_SIZE_MBYTES 0
52 * Default global CMA area size can be defined in kernel's .config.
53 * This is useful mainly for distro maintainers to create a kernel
54 * that works correctly for most supported systems.
55 * The size can be set in bytes or as a percentage of the total memory
58 * Users, who want to set the size of global CMA area for their system
59 * should use cma= kernel parameter.
61 static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
62 static phys_addr_t size_cmdline = -1;
64 static int __init early_cma(char *p)
66 pr_debug("%s(%s)\n", __func__, p);
67 size_cmdline = memparse(p, &p);
70 early_param("cma", early_cma);
72 #ifdef CONFIG_CMA_SIZE_PERCENTAGE
74 static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
76 struct memblock_region *reg;
77 unsigned long total_pages = 0;
80 * We cannot use memblock_phys_mem_size() here, because
81 * memblock_analyze() has not been called yet.
83 for_each_memblock(memory, reg)
84 total_pages += memblock_region_memory_end_pfn(reg) -
85 memblock_region_memory_base_pfn(reg);
87 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
92 static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
100 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
101 * @limit: End address of the reserved memory (optional, 0 for any).
103 * This function reserves memory from early allocator. It should be
104 * called by arch specific code once the early allocator (memblock or bootmem)
105 * has been activated and all other subsystems have already allocated/reserved
108 void __init dma_contiguous_reserve(phys_addr_t limit)
110 phys_addr_t selected_size = 0;
112 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
114 if (size_cmdline != -1) {
115 selected_size = size_cmdline;
117 #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
118 selected_size = size_bytes;
119 #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
120 selected_size = cma_early_percent_memory();
121 #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
122 selected_size = min(size_bytes, cma_early_percent_memory());
123 #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
124 selected_size = max(size_bytes, cma_early_percent_memory());
128 if (selected_size && !dma_contiguous_default_area) {
129 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
130 (unsigned long)selected_size / SZ_1M);
132 dma_contiguous_reserve_area(selected_size, 0, limit,
133 &dma_contiguous_default_area);
137 static DEFINE_MUTEX(cma_mutex);
139 static int __init cma_activate_area(struct cma *cma)
141 int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
142 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
143 unsigned i = cma->count >> pageblock_order;
146 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
151 WARN_ON_ONCE(!pfn_valid(pfn));
152 zone = page_zone(pfn_to_page(pfn));
157 for (j = pageblock_nr_pages; j; --j, pfn++) {
158 WARN_ON_ONCE(!pfn_valid(pfn));
159 if (page_zone(pfn_to_page(pfn)) != zone)
162 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
165 mutex_init(&cma->lock);
169 static struct cma cma_areas[MAX_CMA_AREAS];
170 static unsigned cma_area_count;
172 static int __init cma_init_reserved_areas(void)
176 for (i = 0; i < cma_area_count; i++) {
177 int ret = cma_activate_area(&cma_areas[i]);
184 core_initcall(cma_init_reserved_areas);
187 * dma_contiguous_reserve_area() - reserve custom contiguous area
188 * @size: Size of the reserved area (in bytes),
189 * @base: Base address of the reserved area optional, use 0 for any
190 * @limit: End address of the reserved memory (optional, 0 for any).
191 * @res_cma: Pointer to store the created cma region.
193 * This function reserves memory from early allocator. It should be
194 * called by arch specific code once the early allocator (memblock or bootmem)
195 * has been activated and all other subsystems have already allocated/reserved
196 * memory. This function allows to create custom reserved areas for specific
199 int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
200 phys_addr_t limit, struct cma **res_cma)
202 struct cma *cma = &cma_areas[cma_area_count];
203 phys_addr_t alignment;
206 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
207 (unsigned long)size, (unsigned long)base,
208 (unsigned long)limit);
211 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
212 pr_err("Not enough slots for CMA reserved regions!\n");
219 /* Sanitise input arguments */
220 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
221 base = ALIGN(base, alignment);
222 size = ALIGN(size, alignment);
223 limit &= ~(alignment - 1);
227 if (memblock_is_region_reserved(base, size) ||
228 memblock_reserve(base, size) < 0) {
234 * Use __memblock_alloc_base() since
235 * memblock_alloc_base() panic()s.
237 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
247 * Each reserved area must be initialised later, when more kernel
248 * subsystems (like slab allocator) are available.
250 cma->base_pfn = PFN_DOWN(base);
251 cma->count = size >> PAGE_SHIFT;
255 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
256 (unsigned long)base);
258 /* Architecture specific contiguous memory fixup. */
259 dma_contiguous_early_fixup(base, size);
262 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
266 static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
268 mutex_lock(&cma->lock);
269 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
270 mutex_unlock(&cma->lock);
274 * dma_alloc_from_contiguous() - allocate pages from contiguous area
275 * @dev: Pointer to device for which the allocation is performed.
276 * @count: Requested number of pages.
277 * @align: Requested alignment of pages (in PAGE_SIZE order).
279 * This function allocates memory buffer for specified device. It uses
280 * device specific contiguous memory area if available or the default
281 * global one. Requires architecture specific dev_get_cma_area() helper
284 struct page *dma_alloc_from_contiguous(struct device *dev, int count,
287 unsigned long mask, pfn, pageno, start = 0;
288 struct cma *cma = dev_get_cma_area(dev);
289 struct page *page = NULL;
292 if (!cma || !cma->count)
295 if (align > CONFIG_CMA_ALIGNMENT)
296 align = CONFIG_CMA_ALIGNMENT;
298 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
304 mask = (1 << align) - 1;
308 mutex_lock(&cma->lock);
309 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
311 if (pageno >= cma->count) {
312 mutex_unlock(&cma->lock);
315 bitmap_set(cma->bitmap, pageno, count);
317 * It's safe to drop the lock here. We've marked this region for
318 * our exclusive use. If the migration fails we will take the
319 * lock again and unmark it.
321 mutex_unlock(&cma->lock);
323 pfn = cma->base_pfn + pageno;
324 mutex_lock(&cma_mutex);
325 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
326 mutex_unlock(&cma_mutex);
328 page = pfn_to_page(pfn);
330 } else if (ret != -EBUSY) {
331 clear_cma_bitmap(cma, pfn, count);
334 clear_cma_bitmap(cma, pfn, count);
335 pr_debug("%s(): memory range at %p is busy, retrying\n",
336 __func__, pfn_to_page(pfn));
337 /* try again with a bit different memory target */
338 start = pageno + mask + 1;
341 pr_debug("%s(): returned %p\n", __func__, page);
346 * dma_release_from_contiguous() - release allocated pages
347 * @dev: Pointer to device for which the pages were allocated.
348 * @pages: Allocated pages.
349 * @count: Number of allocated pages.
351 * This function releases memory allocated by dma_alloc_from_contiguous().
352 * It returns false when provided pages do not belong to contiguous area and
355 bool dma_release_from_contiguous(struct device *dev, struct page *pages,
358 struct cma *cma = dev_get_cma_area(dev);
364 pr_debug("%s(page %p)\n", __func__, (void *)pages);
366 pfn = page_to_pfn(pages);
368 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
371 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
373 free_contig_range(pfn, count);
374 clear_cma_bitmap(cma, pfn, count);