2 * Contiguous Memory Allocator
4 * Copyright (c) 2010-2011 by Samsung Electronics.
5 * Copyright IBM Corporation, 2013
6 * Copyright LG Electronics Inc., 2014
8 * Marek Szyprowski <m.szyprowski@samsung.com>
9 * Michal Nazarewicz <mina86@mina86.com>
10 * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
11 * Joonsoo Kim <iamjoonsoo.kim@lge.com>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; either version 2 of the
16 * License or (at your optional) any later version of the license.
19 #define pr_fmt(fmt) "cma: " fmt
21 #ifdef CONFIG_CMA_DEBUG
26 #define CREATE_TRACE_POINTS
28 #include <linux/memblock.h>
29 #include <linux/err.h>
31 #include <linux/mutex.h>
32 #include <linux/sizes.h>
33 #include <linux/slab.h>
34 #include <linux/log2.h>
35 #include <linux/cma.h>
36 #include <linux/highmem.h>
38 #include <trace/events/cma.h>
42 struct cma cma_areas[MAX_CMA_AREAS];
43 unsigned cma_area_count;
44 static DEFINE_MUTEX(cma_mutex);
46 phys_addr_t cma_get_base(const struct cma *cma)
48 return PFN_PHYS(cma->base_pfn);
51 unsigned long cma_get_size(const struct cma *cma)
53 return cma->count << PAGE_SHIFT;
56 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
59 if (align_order <= cma->order_per_bit)
61 return (1UL << (align_order - cma->order_per_bit)) - 1;
65 * Find a PFN aligned to the specified order and return an offset represented in
68 static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
71 if (align_order <= cma->order_per_bit)
74 return (ALIGN(cma->base_pfn, (1UL << align_order))
75 - cma->base_pfn) >> cma->order_per_bit;
78 static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
81 return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
84 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
87 unsigned long bitmap_no, bitmap_count;
89 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
90 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
92 mutex_lock(&cma->lock);
93 bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
94 mutex_unlock(&cma->lock);
97 static int __init cma_activate_area(struct cma *cma)
99 int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
100 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
101 unsigned i = cma->count >> pageblock_order;
104 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
109 WARN_ON_ONCE(!pfn_valid(pfn));
110 zone = page_zone(pfn_to_page(pfn));
116 for (j = pageblock_nr_pages; j; --j, pfn++) {
117 WARN_ON_ONCE(!pfn_valid(pfn));
119 * alloc_contig_range requires the pfn range
120 * specified to be in the same zone. Make this
121 * simple by forcing the entire CMA resv range
122 * to be in the same zone.
124 if (page_zone(pfn_to_page(pfn)) != zone)
127 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
130 mutex_init(&cma->lock);
132 #ifdef CONFIG_CMA_DEBUGFS
133 INIT_HLIST_HEAD(&cma->mem_head);
134 spin_lock_init(&cma->mem_head_lock);
145 static int __init cma_init_reserved_areas(void)
149 for (i = 0; i < cma_area_count; i++) {
150 int ret = cma_activate_area(&cma_areas[i]);
158 core_initcall(cma_init_reserved_areas);
161 * cma_init_reserved_mem() - create custom contiguous area from reserved memory
162 * @base: Base address of the reserved area
163 * @size: Size of the reserved area (in bytes),
164 * @order_per_bit: Order of pages represented by one bit on bitmap.
165 * @res_cma: Pointer to store the created cma region.
167 * This function creates custom contiguous area from already reserved memory.
169 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
170 unsigned int order_per_bit,
171 struct cma **res_cma)
174 phys_addr_t alignment;
177 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
178 pr_err("Not enough slots for CMA reserved regions!\n");
182 if (!size || !memblock_is_region_reserved(base, size))
185 /* ensure minimal alignment required by mm core */
186 alignment = PAGE_SIZE <<
187 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
189 /* alignment should be aligned with order_per_bit */
190 if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
193 if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
197 * Each reserved area must be initialised later, when more kernel
198 * subsystems (like slab allocator) are available.
200 cma = &cma_areas[cma_area_count];
201 cma->base_pfn = PFN_DOWN(base);
202 cma->count = size >> PAGE_SHIFT;
203 cma->order_per_bit = order_per_bit;
206 totalcma_pages += (size / PAGE_SIZE);
212 * cma_declare_contiguous() - reserve custom contiguous area
213 * @base: Base address of the reserved area optional, use 0 for any
214 * @size: Size of the reserved area (in bytes),
215 * @limit: End address of the reserved memory (optional, 0 for any).
216 * @alignment: Alignment for the CMA area, should be power of 2 or zero
217 * @order_per_bit: Order of pages represented by one bit on bitmap.
218 * @fixed: hint about where to place the reserved area
219 * @res_cma: Pointer to store the created cma region.
221 * This function reserves memory from early allocator. It should be
222 * called by arch specific code once the early allocator (memblock or bootmem)
223 * has been activated and all other subsystems have already allocated/reserved
224 * memory. This function allows to create custom reserved areas.
226 * If @fixed is true, reserve contiguous area at exactly @base. If false,
227 * reserve in range from @base to @limit.
229 int __init cma_declare_contiguous(phys_addr_t base,
230 phys_addr_t size, phys_addr_t limit,
231 phys_addr_t alignment, unsigned int order_per_bit,
232 bool fixed, struct cma **res_cma)
234 phys_addr_t memblock_end = memblock_end_of_DRAM();
235 phys_addr_t highmem_start;
240 * high_memory isn't direct mapped memory so retrieving its physical
241 * address isn't appropriate. But it would be useful to check the
242 * physical address of the highmem boundary so it's justifiable to get
243 * the physical address from it. On x86 there is a validation check for
244 * this case, so the following workaround is needed to avoid it.
246 highmem_start = __pa_nodebug(high_memory);
248 highmem_start = __pa(high_memory);
250 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
251 __func__, &size, &base, &limit, &alignment);
253 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
254 pr_err("Not enough slots for CMA reserved regions!\n");
261 if (alignment && !is_power_of_2(alignment))
265 * Sanitise input arguments.
266 * Pages both ends in CMA area could be merged into adjacent unmovable
267 * migratetype page by page allocator's buddy algorithm. In the case,
268 * you couldn't get a contiguous memory, which is not what we want.
270 alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
271 max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
272 base = ALIGN(base, alignment);
273 size = ALIGN(size, alignment);
274 limit &= ~(alignment - 1);
279 /* size should be aligned with order_per_bit */
280 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
284 * If allocating at a fixed base the request region must not cross the
285 * low/high memory boundary.
287 if (fixed && base < highmem_start && base + size > highmem_start) {
289 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
290 &base, &highmem_start);
295 * If the limit is unspecified or above the memblock end, its effective
296 * value will be the memblock end. Set it explicitly to simplify further
299 if (limit == 0 || limit > memblock_end)
300 limit = memblock_end;
304 if (memblock_is_region_reserved(base, size) ||
305 memblock_reserve(base, size) < 0) {
310 phys_addr_t addr = 0;
313 * All pages in the reserved area must come from the same zone.
314 * If the requested region crosses the low/high memory boundary,
315 * try allocating from high memory first and fall back to low
316 * memory in case of failure.
318 if (base < highmem_start && limit > highmem_start) {
319 addr = memblock_alloc_range(size, alignment,
320 highmem_start, limit,
322 limit = highmem_start;
326 addr = memblock_alloc_range(size, alignment, base,
336 * kmemleak scans/reads tracked objects for pointers to other
337 * objects but this address isn't mapped and accessible
339 kmemleak_ignore(phys_to_virt(addr));
343 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
347 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
352 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
357 * cma_alloc() - allocate pages from contiguous area
358 * @cma: Contiguous memory region for which the allocation is performed.
359 * @count: Requested number of pages.
360 * @align: Requested alignment of pages (in PAGE_SIZE order).
362 * This function allocates part of contiguous memory on specific
363 * contiguous memory area.
365 struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align)
367 unsigned long mask, offset;
368 unsigned long pfn = -1;
369 unsigned long start = 0;
370 unsigned long bitmap_maxno, bitmap_no, bitmap_count;
371 struct page *page = NULL;
374 if (!cma || !cma->count)
377 pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma,
383 mask = cma_bitmap_aligned_mask(cma, align);
384 offset = cma_bitmap_aligned_offset(cma, align);
385 bitmap_maxno = cma_bitmap_maxno(cma);
386 bitmap_count = cma_bitmap_pages_to_bits(cma, count);
389 mutex_lock(&cma->lock);
390 bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
391 bitmap_maxno, start, bitmap_count, mask,
393 if (bitmap_no >= bitmap_maxno) {
394 mutex_unlock(&cma->lock);
397 bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
399 * It's safe to drop the lock here. We've marked this region for
400 * our exclusive use. If the migration fails we will take the
401 * lock again and unmark it.
403 mutex_unlock(&cma->lock);
405 pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
406 mutex_lock(&cma_mutex);
407 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
408 mutex_unlock(&cma_mutex);
410 page = pfn_to_page(pfn);
414 cma_clear_bitmap(cma, pfn, count);
418 pr_debug("%s(): memory range at %p is busy, retrying\n",
419 __func__, pfn_to_page(pfn));
420 /* try again with a bit different memory target */
421 start = bitmap_no + mask + 1;
424 trace_cma_alloc(pfn, page, count, align);
426 pr_debug("%s(): returned %p\n", __func__, page);
431 * cma_release() - release allocated pages
432 * @cma: Contiguous memory region for which the allocation is performed.
433 * @pages: Allocated pages.
434 * @count: Number of allocated pages.
436 * This function releases memory allocated by alloc_cma().
437 * It returns false when provided pages do not belong to contiguous area and
440 bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
447 pr_debug("%s(page %p)\n", __func__, (void *)pages);
449 pfn = page_to_pfn(pages);
451 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
454 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
456 free_contig_range(pfn, count);
457 cma_clear_bitmap(cma, pfn, count);
458 trace_cma_release(pfn, pages, count);