2 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/bootmem.h>
9 #include <linux/init.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/memblock.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/pgtable.h>
17 #include <asm/setup.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
20 #include <asm/set_memory.h>
22 static DEFINE_MUTEX(vmem_mutex);
24 struct memory_segment {
25 struct list_head list;
30 static LIST_HEAD(mem_segs);
32 static void __ref *vmem_alloc_pages(unsigned int order)
34 unsigned long size = PAGE_SIZE << order;
36 if (slab_is_available())
37 return (void *)__get_free_pages(GFP_KERNEL, order);
38 return (void *) memblock_alloc(size, size);
41 static inline pud_t *vmem_pud_alloc(void)
45 pud = vmem_alloc_pages(2);
48 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
52 pmd_t *vmem_pmd_alloc(void)
56 pmd = vmem_alloc_pages(2);
59 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
63 pte_t __ref *vmem_pte_alloc(void)
65 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
68 if (slab_is_available())
69 pte = (pte_t *) page_table_alloc(&init_mm);
71 pte = (pte_t *) memblock_alloc(size, size);
74 clear_table((unsigned long *) pte, _PAGE_INVALID, size);
79 * Add a physical memory range to the 1:1 mapping.
81 static int vmem_add_mem(unsigned long start, unsigned long size)
83 unsigned long pgt_prot, sgt_prot, r3_prot;
84 unsigned long pages4k, pages1m, pages2g;
85 unsigned long end = start + size;
86 unsigned long address = start;
93 pgt_prot = pgprot_val(PAGE_KERNEL);
94 sgt_prot = pgprot_val(SEGMENT_KERNEL);
95 r3_prot = pgprot_val(REGION3_KERNEL);
96 if (!MACHINE_HAS_NX) {
97 pgt_prot &= ~_PAGE_NOEXEC;
98 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
99 r3_prot &= ~_REGION_ENTRY_NOEXEC;
101 pages4k = pages1m = pages2g = 0;
102 while (address < end) {
103 pg_dir = pgd_offset_k(address);
104 if (pgd_none(*pg_dir)) {
105 pu_dir = vmem_pud_alloc();
108 pgd_populate(&init_mm, pg_dir, pu_dir);
110 pu_dir = pud_offset(pg_dir, address);
111 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
112 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
113 !debug_pagealloc_enabled()) {
114 pud_val(*pu_dir) = address | r3_prot;
119 if (pud_none(*pu_dir)) {
120 pm_dir = vmem_pmd_alloc();
123 pud_populate(&init_mm, pu_dir, pm_dir);
125 pm_dir = pmd_offset(pu_dir, address);
126 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
127 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
128 !debug_pagealloc_enabled()) {
129 pmd_val(*pm_dir) = address | sgt_prot;
134 if (pmd_none(*pm_dir)) {
135 pt_dir = vmem_pte_alloc();
138 pmd_populate(&init_mm, pm_dir, pt_dir);
141 pt_dir = pte_offset_kernel(pm_dir, address);
142 pte_val(*pt_dir) = address | pgt_prot;
143 address += PAGE_SIZE;
148 update_page_count(PG_DIRECT_MAP_4K, pages4k);
149 update_page_count(PG_DIRECT_MAP_1M, pages1m);
150 update_page_count(PG_DIRECT_MAP_2G, pages2g);
155 * Remove a physical memory range from the 1:1 mapping.
156 * Currently only invalidates page table entries.
158 static void vmem_remove_range(unsigned long start, unsigned long size)
160 unsigned long pages4k, pages1m, pages2g;
161 unsigned long end = start + size;
162 unsigned long address = start;
168 pages4k = pages1m = pages2g = 0;
169 while (address < end) {
170 pg_dir = pgd_offset_k(address);
171 if (pgd_none(*pg_dir)) {
172 address += PGDIR_SIZE;
175 pu_dir = pud_offset(pg_dir, address);
176 if (pud_none(*pu_dir)) {
180 if (pud_large(*pu_dir)) {
186 pm_dir = pmd_offset(pu_dir, address);
187 if (pmd_none(*pm_dir)) {
191 if (pmd_large(*pm_dir)) {
197 pt_dir = pte_offset_kernel(pm_dir, address);
198 pte_clear(&init_mm, address, pt_dir);
199 address += PAGE_SIZE;
202 flush_tlb_kernel_range(start, end);
203 update_page_count(PG_DIRECT_MAP_4K, -pages4k);
204 update_page_count(PG_DIRECT_MAP_1M, -pages1m);
205 update_page_count(PG_DIRECT_MAP_2G, -pages2g);
209 * Add a backed mem_map array to the virtual mem_map array.
211 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
213 unsigned long pgt_prot, sgt_prot;
214 unsigned long address = start;
221 pgt_prot = pgprot_val(PAGE_KERNEL);
222 sgt_prot = pgprot_val(SEGMENT_KERNEL);
223 if (!MACHINE_HAS_NX) {
224 pgt_prot &= ~_PAGE_NOEXEC;
225 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
227 for (address = start; address < end;) {
228 pg_dir = pgd_offset_k(address);
229 if (pgd_none(*pg_dir)) {
230 pu_dir = vmem_pud_alloc();
233 pgd_populate(&init_mm, pg_dir, pu_dir);
236 pu_dir = pud_offset(pg_dir, address);
237 if (pud_none(*pu_dir)) {
238 pm_dir = vmem_pmd_alloc();
241 pud_populate(&init_mm, pu_dir, pm_dir);
244 pm_dir = pmd_offset(pu_dir, address);
245 if (pmd_none(*pm_dir)) {
246 /* Use 1MB frames for vmemmap if available. We always
247 * use large frames even if they are only partially
249 * Otherwise we would have also page tables since
250 * vmemmap_populate gets called for each section
252 if (MACHINE_HAS_EDAT1) {
255 new_page = vmemmap_alloc_block(PMD_SIZE, node);
258 pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
259 address = (address + PMD_SIZE) & PMD_MASK;
262 pt_dir = vmem_pte_alloc();
265 pmd_populate(&init_mm, pm_dir, pt_dir);
266 } else if (pmd_large(*pm_dir)) {
267 address = (address + PMD_SIZE) & PMD_MASK;
271 pt_dir = pte_offset_kernel(pm_dir, address);
272 if (pte_none(*pt_dir)) {
275 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
278 pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
280 address += PAGE_SIZE;
287 void vmemmap_free(unsigned long start, unsigned long end)
292 * Add memory segment to the segment list if it doesn't overlap with
293 * an already present segment.
295 static int insert_memory_segment(struct memory_segment *seg)
297 struct memory_segment *tmp;
299 if (seg->start + seg->size > VMEM_MAX_PHYS ||
300 seg->start + seg->size < seg->start)
303 list_for_each_entry(tmp, &mem_segs, list) {
304 if (seg->start >= tmp->start + tmp->size)
306 if (seg->start + seg->size <= tmp->start)
310 list_add(&seg->list, &mem_segs);
315 * Remove memory segment from the segment list.
317 static void remove_memory_segment(struct memory_segment *seg)
319 list_del(&seg->list);
322 static void __remove_shared_memory(struct memory_segment *seg)
324 remove_memory_segment(seg);
325 vmem_remove_range(seg->start, seg->size);
328 int vmem_remove_mapping(unsigned long start, unsigned long size)
330 struct memory_segment *seg;
333 mutex_lock(&vmem_mutex);
336 list_for_each_entry(seg, &mem_segs, list) {
337 if (seg->start == start && seg->size == size)
341 if (seg->start != start || seg->size != size)
345 __remove_shared_memory(seg);
348 mutex_unlock(&vmem_mutex);
352 int vmem_add_mapping(unsigned long start, unsigned long size)
354 struct memory_segment *seg;
357 mutex_lock(&vmem_mutex);
359 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
365 ret = insert_memory_segment(seg);
369 ret = vmem_add_mem(start, size);
375 __remove_shared_memory(seg);
379 mutex_unlock(&vmem_mutex);
384 * map whole physical memory to virtual memory (identity mapping)
385 * we reserve enough space in the vmalloc area for vmemmap to hotplug
386 * additional memory segments.
388 void __init vmem_map_init(void)
390 struct memblock_region *reg;
392 for_each_memblock(memory, reg)
393 vmem_add_mem(reg->base, reg->size);
394 __set_memory((unsigned long) _stext,
395 (_etext - _stext) >> PAGE_SHIFT,
396 SET_MEMORY_RO | SET_MEMORY_X);
397 __set_memory((unsigned long) _etext,
398 (_eshared - _etext) >> PAGE_SHIFT,
400 __set_memory((unsigned long) _sinittext,
401 (_einittext - _sinittext) >> PAGE_SHIFT,
402 SET_MEMORY_RO | SET_MEMORY_X);
403 pr_info("Write protected kernel read-only data: %luk\n",
404 (_eshared - _stext) >> 10);
408 * Convert memblock.memory to a memory segment list so there is a single
409 * list that contains all memory segments.
411 static int __init vmem_convert_memory_chunk(void)
413 struct memblock_region *reg;
414 struct memory_segment *seg;
416 mutex_lock(&vmem_mutex);
417 for_each_memblock(memory, reg) {
418 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
420 panic("Out of memory...\n");
421 seg->start = reg->base;
422 seg->size = reg->size;
423 insert_memory_segment(seg);
425 mutex_unlock(&vmem_mutex);
429 core_initcall(vmem_convert_memory_chunk);