2 * sparse memory mappings.
4 #include <linux/config.h>
6 #include <linux/mmzone.h>
7 #include <linux/bootmem.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
13 * Permanent SPARSEMEM data:
15 * 1) mem_section - memory sections, mem_map's for valid memory
17 #ifdef CONFIG_SPARSEMEM_EXTREME
18 struct mem_section *mem_section[NR_SECTION_ROOTS]
19 ____cacheline_maxaligned_in_smp;
21 struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
22 ____cacheline_maxaligned_in_smp;
24 EXPORT_SYMBOL(mem_section);
26 #ifdef CONFIG_SPARSEMEM_EXTREME
27 static struct mem_section *sparse_index_alloc(int nid)
29 struct mem_section *section = NULL;
30 unsigned long array_size = SECTIONS_PER_ROOT *
31 sizeof(struct mem_section);
33 section = alloc_bootmem_node(NODE_DATA(nid), array_size);
36 memset(section, 0, array_size);
41 static int sparse_index_init(unsigned long section_nr, int nid)
43 static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED;
44 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
45 struct mem_section *section;
48 if (mem_section[root])
51 section = sparse_index_alloc(nid);
53 * This lock keeps two different sections from
54 * reallocating for the same index
56 spin_lock(&index_init_lock);
58 if (mem_section[root]) {
63 mem_section[root] = section;
65 spin_unlock(&index_init_lock);
68 #else /* !SPARSEMEM_EXTREME */
69 static inline int sparse_index_init(unsigned long section_nr, int nid)
76 * Although written for the SPARSEMEM_EXTREME case, this happens
77 * to also work for the flat array case becase
78 * NR_SECTION_ROOTS==NR_MEM_SECTIONS.
80 int __section_nr(struct mem_section* ms)
82 unsigned long root_nr;
83 struct mem_section* root;
86 root_nr < NR_MEM_SECTIONS;
87 root_nr += SECTIONS_PER_ROOT) {
88 root = __nr_to_section(root_nr);
93 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
97 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
100 /* Record a memory area against a node. */
101 void memory_present(int nid, unsigned long start, unsigned long end)
105 start &= PAGE_SECTION_MASK;
106 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
107 unsigned long section = pfn_to_section_nr(pfn);
108 struct mem_section *ms;
110 sparse_index_init(section, nid);
112 ms = __nr_to_section(section);
113 if (!ms->section_mem_map)
114 ms->section_mem_map = SECTION_MARKED_PRESENT;
119 * Only used by the i386 NUMA architecures, but relatively
122 unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn,
123 unsigned long end_pfn)
126 unsigned long nr_pages = 0;
128 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
129 if (nid != early_pfn_to_nid(pfn))
133 nr_pages += PAGES_PER_SECTION;
136 return nr_pages * sizeof(struct page);
140 * Subtle, we encode the real pfn into the mem_map such that
141 * the identity pfn - section_mem_map will return the actual
142 * physical page frame number.
144 static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
146 return (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
150 * We need this if we ever free the mem_maps. While not implemented yet,
151 * this function is included for parity with its sibling.
153 static __attribute((unused))
154 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
156 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
159 static int sparse_init_one_section(struct mem_section *ms,
160 unsigned long pnum, struct page *mem_map)
162 if (!valid_section(ms))
165 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum);
170 static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
173 int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
174 struct mem_section *ms = __nr_to_section(pnum);
176 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
180 map = alloc_bootmem_node(NODE_DATA(nid),
181 sizeof(struct page) * PAGES_PER_SECTION);
185 printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
186 ms->section_mem_map = 0;
191 * Allocate the accumulated non-linear sections, allocate a mem_map
192 * for each and record the physical to section mapping.
194 void sparse_init(void)
199 for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
200 if (!valid_section_nr(pnum))
203 map = sparse_early_mem_map_alloc(pnum);
206 sparse_init_one_section(__nr_to_section(pnum), pnum, map);
211 * returns the number of sections whose mem_maps were properly
212 * set. If this is <=0, then that means that the passed-in
213 * map was not consumed and must be freed.
215 int sparse_add_one_section(unsigned long start_pfn, int nr_pages, struct page *map)
217 struct mem_section *ms = __pfn_to_section(start_pfn);
219 if (ms->section_mem_map & SECTION_MARKED_PRESENT)
222 ms->section_mem_map |= SECTION_MARKED_PRESENT;
224 return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map);