2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/memblock.h>
23 struct memblock memblock __initdata_memblock;
25 int memblock_debug __initdata_memblock;
26 int memblock_can_resize __initdata_memblock;
27 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
28 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock;
30 /* inline so we don't get a warning when pr_debug is compiled out */
31 static inline const char *memblock_type_name(struct memblock_type *type)
33 if (type == &memblock.memory)
35 else if (type == &memblock.reserved)
42 * Address comparison utilities
44 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
45 phys_addr_t base2, phys_addr_t size2)
47 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
50 static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
51 phys_addr_t base, phys_addr_t size)
55 for (i = 0; i < type->cnt; i++) {
56 phys_addr_t rgnbase = type->regions[i].base;
57 phys_addr_t rgnsize = type->regions[i].size;
58 if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
62 return (i < type->cnt) ? i : -1;
66 * Find, allocate, deallocate or reserve unreserved regions. All allocations
70 static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end,
71 phys_addr_t size, phys_addr_t align)
73 phys_addr_t base, res_base;
76 /* In case, huge size is requested */
80 base = round_down(end - size, align);
82 /* Prevent allocations returning 0 as it's also used to
83 * indicate an allocation failure
88 while (start <= base) {
89 j = memblock_overlaps_region(&memblock.reserved, base, size);
92 res_base = memblock.reserved.regions[j].base;
95 base = round_down(res_base - size, align);
102 * Find a free area with specified alignment in a specific range.
104 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, phys_addr_t end,
105 phys_addr_t size, phys_addr_t align)
111 /* Pump up max_addr */
112 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
113 end = memblock.current_limit;
115 /* We do a top-down search, this tends to limit memory
116 * fragmentation by keeping early boot allocs near the
119 for (i = memblock.memory.cnt - 1; i >= 0; i--) {
120 phys_addr_t memblockbase = memblock.memory.regions[i].base;
121 phys_addr_t memblocksize = memblock.memory.regions[i].size;
122 phys_addr_t bottom, top, found;
124 if (memblocksize < size)
126 if ((memblockbase + memblocksize) <= start)
128 bottom = max(memblockbase, start);
129 top = min(memblockbase + memblocksize, end);
132 found = memblock_find_region(bottom, top, size, align);
140 * Free memblock.reserved.regions
142 int __init_memblock memblock_free_reserved_regions(void)
144 if (memblock.reserved.regions == memblock_reserved_init_regions)
147 return memblock_free(__pa(memblock.reserved.regions),
148 sizeof(struct memblock_region) * memblock.reserved.max);
152 * Reserve memblock.reserved.regions
154 int __init_memblock memblock_reserve_reserved_regions(void)
156 if (memblock.reserved.regions == memblock_reserved_init_regions)
159 return memblock_reserve(__pa(memblock.reserved.regions),
160 sizeof(struct memblock_region) * memblock.reserved.max);
163 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
165 memmove(&type->regions[r], &type->regions[r + 1],
166 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
169 /* Special case for empty arrays */
170 if (type->cnt == 0) {
172 type->regions[0].base = 0;
173 type->regions[0].size = 0;
174 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
178 static int __init_memblock memblock_double_array(struct memblock_type *type)
180 struct memblock_region *new_array, *old_array;
181 phys_addr_t old_size, new_size, addr;
182 int use_slab = slab_is_available();
184 /* We don't allow resizing until we know about the reserved regions
185 * of memory that aren't suitable for allocation
187 if (!memblock_can_resize)
190 /* Calculate new doubled size */
191 old_size = type->max * sizeof(struct memblock_region);
192 new_size = old_size << 1;
194 /* Try to find some space for it.
196 * WARNING: We assume that either slab_is_available() and we use it or
197 * we use MEMBLOCK for allocations. That means that this is unsafe to use
198 * when bootmem is currently active (unless bootmem itself is implemented
199 * on top of MEMBLOCK which isn't the case yet)
201 * This should however not be an issue for now, as we currently only
202 * call into MEMBLOCK while it's still active, or much later when slab is
203 * active for memory hotplug operations
206 new_array = kmalloc(new_size, GFP_KERNEL);
207 addr = new_array ? __pa(new_array) : 0;
209 addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
211 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
212 memblock_type_name(type), type->max, type->max * 2);
215 new_array = __va(addr);
217 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
218 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
220 /* Found space, we now need to move the array over before
221 * we add the reserved region since it may be our reserved
222 * array itself that is full.
224 memcpy(new_array, type->regions, old_size);
225 memset(new_array + type->max, 0, old_size);
226 old_array = type->regions;
227 type->regions = new_array;
230 /* If we use SLAB that's it, we are done */
234 /* Add the new reserved region now. Should not fail ! */
235 BUG_ON(memblock_reserve(addr, new_size));
237 /* If the array wasn't our static init one, then free it. We only do
238 * that before SLAB is available as later on, we don't know whether
239 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
242 if (old_array != memblock_memory_init_regions &&
243 old_array != memblock_reserved_init_regions)
244 memblock_free(__pa(old_array), old_size);
250 * memblock_merge_regions - merge neighboring compatible regions
251 * @type: memblock type to scan
253 * Scan @type and merge neighboring compatible regions.
255 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
259 /* cnt never goes below 1 */
260 while (i < type->cnt - 1) {
261 struct memblock_region *this = &type->regions[i];
262 struct memblock_region *next = &type->regions[i + 1];
264 if (this->base + this->size != next->base ||
265 memblock_get_region_node(this) !=
266 memblock_get_region_node(next)) {
267 BUG_ON(this->base + this->size > next->base);
272 this->size += next->size;
273 memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
279 * memblock_insert_region - insert new memblock region
280 * @type: memblock type to insert into
281 * @idx: index for the insertion point
282 * @base: base address of the new region
283 * @size: size of the new region
285 * Insert new memblock region [@base,@base+@size) into @type at @idx.
286 * @type must already have extra room to accomodate the new region.
288 static void __init_memblock memblock_insert_region(struct memblock_type *type,
289 int idx, phys_addr_t base,
290 phys_addr_t size, int nid)
292 struct memblock_region *rgn = &type->regions[idx];
294 BUG_ON(type->cnt >= type->max);
295 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
298 memblock_set_region_node(rgn, nid);
303 * memblock_add_region - add new memblock region
304 * @type: memblock type to add new region into
305 * @base: base address of the new region
306 * @size: size of the new region
308 * Add new memblock region [@base,@base+@size) into @type. The new region
309 * is allowed to overlap with existing ones - overlaps don't affect already
310 * existing regions. @type is guaranteed to be minimal (all neighbouring
311 * compatible regions are merged) after the addition.
314 * 0 on success, -errno on failure.
316 static int __init_memblock memblock_add_region(struct memblock_type *type,
317 phys_addr_t base, phys_addr_t size)
320 phys_addr_t obase = base, end = base + size;
323 /* special case for empty array */
324 if (type->regions[0].size == 0) {
325 WARN_ON(type->cnt != 1);
326 type->regions[0].base = base;
327 type->regions[0].size = size;
328 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
333 * The following is executed twice. Once with %false @insert and
334 * then with %true. The first counts the number of regions needed
335 * to accomodate the new area. The second actually inserts them.
340 for (i = 0; i < type->cnt; i++) {
341 struct memblock_region *rgn = &type->regions[i];
342 phys_addr_t rbase = rgn->base;
343 phys_addr_t rend = rbase + rgn->size;
350 * @rgn overlaps. If it separates the lower part of new
351 * area, insert that portion.
356 memblock_insert_region(type, i++, base,
357 rbase - base, MAX_NUMNODES);
359 /* area below @rend is dealt with, forget about it */
360 base = min(rend, end);
363 /* insert the remaining portion */
367 memblock_insert_region(type, i, base, end - base,
372 * If this was the first round, resize array and repeat for actual
373 * insertions; otherwise, merge and return.
376 while (type->cnt + nr_new > type->max)
377 if (memblock_double_array(type) < 0)
382 memblock_merge_regions(type);
387 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
389 return memblock_add_region(&memblock.memory, base, size);
392 static int __init_memblock __memblock_remove(struct memblock_type *type,
393 phys_addr_t base, phys_addr_t size)
395 phys_addr_t end = base + size;
398 /* Walk through the array for collisions */
399 for (i = 0; i < type->cnt; i++) {
400 struct memblock_region *rgn = &type->regions[i];
401 phys_addr_t rend = rgn->base + rgn->size;
403 /* Nothing more to do, exit */
404 if (rgn->base > end || rgn->size == 0)
407 /* If we fully enclose the block, drop it */
408 if (base <= rgn->base && end >= rend) {
409 memblock_remove_region(type, i--);
413 /* If we are fully enclosed within a block
414 * then we need to split it and we are done
416 if (base > rgn->base && end < rend) {
417 rgn->size = base - rgn->base;
418 if (!memblock_add_region(type, end, rend - end))
420 /* Failure to split is bad, we at least
421 * restore the block before erroring
423 rgn->size = rend - rgn->base;
428 /* Check if we need to trim the bottom of a block */
429 if (rgn->base < end && rend > end) {
430 rgn->size -= end - rgn->base;
435 /* And check if we need to trim the top of a block */
437 rgn->size -= rend - base;
443 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
445 return __memblock_remove(&memblock.memory, base, size);
448 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
450 memblock_dbg(" memblock_free: [%#016llx-%#016llx] %pF\n",
451 (unsigned long long)base,
452 (unsigned long long)base + size,
455 return __memblock_remove(&memblock.reserved, base, size);
458 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
460 struct memblock_type *_rgn = &memblock.reserved;
462 memblock_dbg("memblock_reserve: [%#016llx-%#016llx] %pF\n",
463 (unsigned long long)base,
464 (unsigned long long)base + size,
468 return memblock_add_region(_rgn, base, size);
472 * __next_free_mem_range - next function for for_each_free_mem_range()
473 * @idx: pointer to u64 loop variable
474 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
475 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
476 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
477 * @p_nid: ptr to int for nid of the range, can be %NULL
479 * Find the first free area from *@idx which matches @nid, fill the out
480 * parameters, and update *@idx for the next iteration. The lower 32bit of
481 * *@idx contains index into memory region and the upper 32bit indexes the
482 * areas before each reserved region. For example, if reserved regions
483 * look like the following,
485 * 0:[0-16), 1:[32-48), 2:[128-130)
487 * The upper 32bit indexes the following regions.
489 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
491 * As both region arrays are sorted, the function advances the two indices
492 * in lockstep and returns each intersection.
494 void __init_memblock __next_free_mem_range(u64 *idx, int nid,
495 phys_addr_t *out_start,
496 phys_addr_t *out_end, int *out_nid)
498 struct memblock_type *mem = &memblock.memory;
499 struct memblock_type *rsv = &memblock.reserved;
500 int mi = *idx & 0xffffffff;
503 for ( ; mi < mem->cnt; mi++) {
504 struct memblock_region *m = &mem->regions[mi];
505 phys_addr_t m_start = m->base;
506 phys_addr_t m_end = m->base + m->size;
508 /* only memory regions are associated with nodes, check it */
509 if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m))
512 /* scan areas before each reservation for intersection */
513 for ( ; ri < rsv->cnt + 1; ri++) {
514 struct memblock_region *r = &rsv->regions[ri];
515 phys_addr_t r_start = ri ? r[-1].base + r[-1].size : 0;
516 phys_addr_t r_end = ri < rsv->cnt ? r->base : ULLONG_MAX;
518 /* if ri advanced past mi, break out to advance mi */
519 if (r_start >= m_end)
521 /* if the two regions intersect, we're done */
522 if (m_start < r_end) {
524 *out_start = max(m_start, r_start);
526 *out_end = min(m_end, r_end);
528 *out_nid = memblock_get_region_node(m);
530 * The region which ends first is advanced
531 * for the next iteration.
537 *idx = (u32)mi | (u64)ri << 32;
543 /* signal end of iteration */
547 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
549 * Common iterator interface used to define for_each_mem_range().
551 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
552 unsigned long *out_start_pfn,
553 unsigned long *out_end_pfn, int *out_nid)
555 struct memblock_type *type = &memblock.memory;
556 struct memblock_region *r;
558 while (++*idx < type->cnt) {
559 r = &type->regions[*idx];
561 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
563 if (nid == MAX_NUMNODES || nid == r->nid)
566 if (*idx >= type->cnt) {
572 *out_start_pfn = PFN_UP(r->base);
574 *out_end_pfn = PFN_DOWN(r->base + r->size);
580 * memblock_set_node - set node ID on memblock regions
581 * @base: base of area to set node ID for
582 * @size: size of area to set node ID for
583 * @nid: node ID to set
585 * Set the nid of memblock memory regions in [@base,@base+@size) to @nid.
586 * Regions which cross the area boundaries are split as necessary.
589 * 0 on success, -errno on failure.
591 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
594 struct memblock_type *type = &memblock.memory;
595 phys_addr_t end = base + size;
598 /* we'll create at most two more regions */
599 while (type->cnt + 2 > type->max)
600 if (memblock_double_array(type) < 0)
603 for (i = 0; i < type->cnt; i++) {
604 struct memblock_region *rgn = &type->regions[i];
605 phys_addr_t rbase = rgn->base;
606 phys_addr_t rend = rbase + rgn->size;
615 * @rgn intersects from below. Split and continue
616 * to process the next region - the new top half.
619 rgn->size = rend - rgn->base;
620 memblock_insert_region(type, i, rbase, base - rbase,
622 } else if (rend > end) {
624 * @rgn intersects from above. Split and redo the
625 * current region - the new bottom half.
628 rgn->size = rend - rgn->base;
629 memblock_insert_region(type, i--, rbase, end - rbase,
632 /* @rgn is fully contained, set ->nid */
637 memblock_merge_regions(type);
640 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
642 phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
646 /* We align the size to limit fragmentation. Without this, a lot of
647 * small allocs quickly eat up the whole reserve array on sparc
649 size = round_up(size, align);
651 found = memblock_find_in_range(0, max_addr, size, align);
652 if (found && !memblock_reserve(found, size))
658 phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
662 alloc = __memblock_alloc_base(size, align, max_addr);
665 panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
666 (unsigned long long) size, (unsigned long long) max_addr);
671 phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
673 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
678 * Additional node-local top-down allocators.
680 * WARNING: Only available after early_node_map[] has been populated,
681 * on some architectures, that is after all the calls to add_active_range()
682 * have been done to populate it.
685 static phys_addr_t __init memblock_nid_range_rev(phys_addr_t start,
686 phys_addr_t end, int *nid)
688 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
689 unsigned long start_pfn, end_pfn;
692 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, nid)
693 if (end > PFN_PHYS(start_pfn) && end <= PFN_PHYS(end_pfn))
694 return max(start, PFN_PHYS(start_pfn));
700 phys_addr_t __init memblock_find_in_range_node(phys_addr_t start,
703 phys_addr_t align, int nid)
705 struct memblock_type *mem = &memblock.memory;
710 /* Pump up max_addr */
711 if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
712 end = memblock.current_limit;
714 for (i = mem->cnt - 1; i >= 0; i--) {
715 struct memblock_region *r = &mem->regions[i];
716 phys_addr_t base = max(start, r->base);
717 phys_addr_t top = min(end, r->base + r->size);
720 phys_addr_t tbase, ret;
723 tbase = memblock_nid_range_rev(base, top, &tnid);
724 if (nid == MAX_NUMNODES || tnid == nid) {
725 ret = memblock_find_region(tbase, top, size, align);
736 phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
741 * We align the size to limit fragmentation. Without this, a lot of
742 * small allocs quickly eat up the whole reserve array on sparc
744 size = round_up(size, align);
746 found = memblock_find_in_range_node(0, MEMBLOCK_ALLOC_ACCESSIBLE,
748 if (found && !memblock_reserve(found, size))
754 phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
756 phys_addr_t res = memblock_alloc_nid(size, align, nid);
760 return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
765 * Remaining API functions
768 /* You must call memblock_analyze() before this. */
769 phys_addr_t __init memblock_phys_mem_size(void)
771 return memblock.memory_size;
775 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
777 return memblock.memory.regions[0].base;
780 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
782 int idx = memblock.memory.cnt - 1;
784 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
787 /* You must call memblock_analyze() after this. */
788 void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
792 struct memblock_region *p;
797 /* Truncate the memblock regions to satisfy the memory limit. */
798 limit = memory_limit;
799 for (i = 0; i < memblock.memory.cnt; i++) {
800 if (limit > memblock.memory.regions[i].size) {
801 limit -= memblock.memory.regions[i].size;
805 memblock.memory.regions[i].size = limit;
806 memblock.memory.cnt = i + 1;
810 memory_limit = memblock_end_of_DRAM();
812 /* And truncate any reserves above the limit also. */
813 for (i = 0; i < memblock.reserved.cnt; i++) {
814 p = &memblock.reserved.regions[i];
816 if (p->base > memory_limit)
818 else if ((p->base + p->size) > memory_limit)
819 p->size = memory_limit - p->base;
822 memblock_remove_region(&memblock.reserved, i);
828 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
830 unsigned int left = 0, right = type->cnt;
833 unsigned int mid = (right + left) / 2;
835 if (addr < type->regions[mid].base)
837 else if (addr >= (type->regions[mid].base +
838 type->regions[mid].size))
842 } while (left < right);
846 int __init memblock_is_reserved(phys_addr_t addr)
848 return memblock_search(&memblock.reserved, addr) != -1;
851 int __init_memblock memblock_is_memory(phys_addr_t addr)
853 return memblock_search(&memblock.memory, addr) != -1;
856 int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
858 int idx = memblock_search(&memblock.memory, base);
862 return memblock.memory.regions[idx].base <= base &&
863 (memblock.memory.regions[idx].base +
864 memblock.memory.regions[idx].size) >= (base + size);
867 int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
869 return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
873 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
875 memblock.current_limit = limit;
878 static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
880 unsigned long long base, size;
883 pr_info(" %s.cnt = 0x%lx\n", name, type->cnt);
885 for (i = 0; i < type->cnt; i++) {
886 struct memblock_region *rgn = &type->regions[i];
887 char nid_buf[32] = "";
891 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
892 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
893 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
894 memblock_get_region_node(rgn));
896 pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s\n",
897 name, i, base, base + size - 1, size, nid_buf);
901 void __init_memblock __memblock_dump_all(void)
903 pr_info("MEMBLOCK configuration:\n");
904 pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);
906 memblock_dump(&memblock.memory, "memory");
907 memblock_dump(&memblock.reserved, "reserved");
910 void __init memblock_analyze(void)
914 /* Check marker in the unused last array entry */
915 WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
916 != MEMBLOCK_INACTIVE);
917 WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
918 != MEMBLOCK_INACTIVE);
920 memblock.memory_size = 0;
922 for (i = 0; i < memblock.memory.cnt; i++)
923 memblock.memory_size += memblock.memory.regions[i].size;
925 /* We allow resizing from there */
926 memblock_can_resize = 1;
929 void __init memblock_init(void)
931 static int init_done __initdata = 0;
937 /* Hookup the initial arrays */
938 memblock.memory.regions = memblock_memory_init_regions;
939 memblock.memory.max = INIT_MEMBLOCK_REGIONS;
940 memblock.reserved.regions = memblock_reserved_init_regions;
941 memblock.reserved.max = INIT_MEMBLOCK_REGIONS;
943 /* Write a marker in the unused last array entry */
944 memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
945 memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = MEMBLOCK_INACTIVE;
947 /* Create a dummy zero size MEMBLOCK which will get coalesced away later.
948 * This simplifies the memblock_add() code below...
950 memblock.memory.regions[0].base = 0;
951 memblock.memory.regions[0].size = 0;
952 memblock_set_region_node(&memblock.memory.regions[0], MAX_NUMNODES);
953 memblock.memory.cnt = 1;
956 memblock.reserved.regions[0].base = 0;
957 memblock.reserved.regions[0].size = 0;
958 memblock_set_region_node(&memblock.reserved.regions[0], MAX_NUMNODES);
959 memblock.reserved.cnt = 1;
961 memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
964 static int __init early_memblock(char *p)
966 if (p && strstr(p, "debug"))
970 early_param("memblock", early_memblock);
972 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
974 static int memblock_debug_show(struct seq_file *m, void *private)
976 struct memblock_type *type = m->private;
977 struct memblock_region *reg;
980 for (i = 0; i < type->cnt; i++) {
981 reg = &type->regions[i];
982 seq_printf(m, "%4d: ", i);
983 if (sizeof(phys_addr_t) == 4)
984 seq_printf(m, "0x%08lx..0x%08lx\n",
985 (unsigned long)reg->base,
986 (unsigned long)(reg->base + reg->size - 1));
988 seq_printf(m, "0x%016llx..0x%016llx\n",
989 (unsigned long long)reg->base,
990 (unsigned long long)(reg->base + reg->size - 1));
996 static int memblock_debug_open(struct inode *inode, struct file *file)
998 return single_open(file, memblock_debug_show, inode->i_private);
1001 static const struct file_operations memblock_debug_fops = {
1002 .open = memblock_debug_open,
1004 .llseek = seq_lseek,
1005 .release = single_release,
1008 static int __init memblock_init_debugfs(void)
1010 struct dentry *root = debugfs_create_dir("memblock", NULL);
1013 debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
1014 debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1018 __initcall(memblock_init_debugfs);
1020 #endif /* CONFIG_DEBUG_FS */