2 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/bootmem.h>
9 #include <linux/init.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <linux/memblock.h>
14 #include <asm/cacheflush.h>
15 #include <asm/pgalloc.h>
16 #include <asm/pgtable.h>
17 #include <asm/setup.h>
18 #include <asm/tlbflush.h>
19 #include <asm/sections.h>
21 static DEFINE_MUTEX(vmem_mutex);
23 struct memory_segment {
24 struct list_head list;
29 static LIST_HEAD(mem_segs);
31 static void __ref *vmem_alloc_pages(unsigned int order)
33 unsigned long size = PAGE_SIZE << order;
35 if (slab_is_available())
36 return (void *)__get_free_pages(GFP_KERNEL, order);
37 return (void *) memblock_alloc(size, size);
40 static inline pud_t *vmem_pud_alloc(void)
44 pud = vmem_alloc_pages(2);
47 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
51 pmd_t *vmem_pmd_alloc(void)
55 pmd = vmem_alloc_pages(2);
58 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
62 pte_t __ref *vmem_pte_alloc(void)
64 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
67 if (slab_is_available())
68 pte = (pte_t *) page_table_alloc(&init_mm);
70 pte = (pte_t *) memblock_alloc(size, size);
73 clear_table((unsigned long *) pte, _PAGE_INVALID, size);
78 * Add a physical memory range to the 1:1 mapping.
80 static int vmem_add_mem(unsigned long start, unsigned long size)
82 unsigned long pgt_prot, sgt_prot, r3_prot;
83 unsigned long pages4k, pages1m, pages2g;
84 unsigned long end = start + size;
85 unsigned long address = start;
92 pgt_prot = pgprot_val(PAGE_KERNEL);
93 sgt_prot = pgprot_val(SEGMENT_KERNEL);
94 r3_prot = pgprot_val(REGION3_KERNEL);
95 if (!MACHINE_HAS_NX) {
96 pgt_prot &= ~_PAGE_NOEXEC;
97 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
98 r3_prot &= ~_REGION_ENTRY_NOEXEC;
100 pages4k = pages1m = pages2g = 0;
101 while (address < end) {
102 pg_dir = pgd_offset_k(address);
103 if (pgd_none(*pg_dir)) {
104 pu_dir = vmem_pud_alloc();
107 pgd_populate(&init_mm, pg_dir, pu_dir);
109 pu_dir = pud_offset(pg_dir, address);
110 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
111 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
112 !debug_pagealloc_enabled()) {
113 pud_val(*pu_dir) = address | r3_prot;
118 if (pud_none(*pu_dir)) {
119 pm_dir = vmem_pmd_alloc();
122 pud_populate(&init_mm, pu_dir, pm_dir);
124 pm_dir = pmd_offset(pu_dir, address);
125 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
126 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
127 !debug_pagealloc_enabled()) {
128 pmd_val(*pm_dir) = address | sgt_prot;
133 if (pmd_none(*pm_dir)) {
134 pt_dir = vmem_pte_alloc();
137 pmd_populate(&init_mm, pm_dir, pt_dir);
140 pt_dir = pte_offset_kernel(pm_dir, address);
141 pte_val(*pt_dir) = address | pgt_prot;
142 address += PAGE_SIZE;
147 update_page_count(PG_DIRECT_MAP_4K, pages4k);
148 update_page_count(PG_DIRECT_MAP_1M, pages1m);
149 update_page_count(PG_DIRECT_MAP_2G, pages2g);
154 * Remove a physical memory range from the 1:1 mapping.
155 * Currently only invalidates page table entries.
157 static void vmem_remove_range(unsigned long start, unsigned long size)
159 unsigned long pages4k, pages1m, pages2g;
160 unsigned long end = start + size;
161 unsigned long address = start;
167 pages4k = pages1m = pages2g = 0;
168 while (address < end) {
169 pg_dir = pgd_offset_k(address);
170 if (pgd_none(*pg_dir)) {
171 address += PGDIR_SIZE;
174 pu_dir = pud_offset(pg_dir, address);
175 if (pud_none(*pu_dir)) {
179 if (pud_large(*pu_dir)) {
185 pm_dir = pmd_offset(pu_dir, address);
186 if (pmd_none(*pm_dir)) {
190 if (pmd_large(*pm_dir)) {
196 pt_dir = pte_offset_kernel(pm_dir, address);
197 pte_clear(&init_mm, address, pt_dir);
198 address += PAGE_SIZE;
201 flush_tlb_kernel_range(start, end);
202 update_page_count(PG_DIRECT_MAP_4K, -pages4k);
203 update_page_count(PG_DIRECT_MAP_1M, -pages1m);
204 update_page_count(PG_DIRECT_MAP_2G, -pages2g);
208 * Add a backed mem_map array to the virtual mem_map array.
210 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
212 unsigned long pgt_prot, sgt_prot;
213 unsigned long address = start;
220 pgt_prot = pgprot_val(PAGE_KERNEL);
221 sgt_prot = pgprot_val(SEGMENT_KERNEL);
222 if (!MACHINE_HAS_NX) {
223 pgt_prot &= ~_PAGE_NOEXEC;
224 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
226 for (address = start; address < end;) {
227 pg_dir = pgd_offset_k(address);
228 if (pgd_none(*pg_dir)) {
229 pu_dir = vmem_pud_alloc();
232 pgd_populate(&init_mm, pg_dir, pu_dir);
235 pu_dir = pud_offset(pg_dir, address);
236 if (pud_none(*pu_dir)) {
237 pm_dir = vmem_pmd_alloc();
240 pud_populate(&init_mm, pu_dir, pm_dir);
243 pm_dir = pmd_offset(pu_dir, address);
244 if (pmd_none(*pm_dir)) {
245 /* Use 1MB frames for vmemmap if available. We always
246 * use large frames even if they are only partially
248 * Otherwise we would have also page tables since
249 * vmemmap_populate gets called for each section
251 if (MACHINE_HAS_EDAT1) {
254 new_page = vmemmap_alloc_block(PMD_SIZE, node);
257 pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
258 address = (address + PMD_SIZE) & PMD_MASK;
261 pt_dir = vmem_pte_alloc();
264 pmd_populate(&init_mm, pm_dir, pt_dir);
265 } else if (pmd_large(*pm_dir)) {
266 address = (address + PMD_SIZE) & PMD_MASK;
270 pt_dir = pte_offset_kernel(pm_dir, address);
271 if (pte_none(*pt_dir)) {
274 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
277 pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
279 address += PAGE_SIZE;
286 void vmemmap_free(unsigned long start, unsigned long end)
291 * Add memory segment to the segment list if it doesn't overlap with
292 * an already present segment.
294 static int insert_memory_segment(struct memory_segment *seg)
296 struct memory_segment *tmp;
298 if (seg->start + seg->size > VMEM_MAX_PHYS ||
299 seg->start + seg->size < seg->start)
302 list_for_each_entry(tmp, &mem_segs, list) {
303 if (seg->start >= tmp->start + tmp->size)
305 if (seg->start + seg->size <= tmp->start)
309 list_add(&seg->list, &mem_segs);
314 * Remove memory segment from the segment list.
316 static void remove_memory_segment(struct memory_segment *seg)
318 list_del(&seg->list);
321 static void __remove_shared_memory(struct memory_segment *seg)
323 remove_memory_segment(seg);
324 vmem_remove_range(seg->start, seg->size);
327 int vmem_remove_mapping(unsigned long start, unsigned long size)
329 struct memory_segment *seg;
332 mutex_lock(&vmem_mutex);
335 list_for_each_entry(seg, &mem_segs, list) {
336 if (seg->start == start && seg->size == size)
340 if (seg->start != start || seg->size != size)
344 __remove_shared_memory(seg);
347 mutex_unlock(&vmem_mutex);
351 int vmem_add_mapping(unsigned long start, unsigned long size)
353 struct memory_segment *seg;
356 mutex_lock(&vmem_mutex);
358 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
364 ret = insert_memory_segment(seg);
368 ret = vmem_add_mem(start, size);
374 __remove_shared_memory(seg);
378 mutex_unlock(&vmem_mutex);
383 * map whole physical memory to virtual memory (identity mapping)
384 * we reserve enough space in the vmalloc area for vmemmap to hotplug
385 * additional memory segments.
387 void __init vmem_map_init(void)
389 struct memblock_region *reg;
391 for_each_memblock(memory, reg)
392 vmem_add_mem(reg->base, reg->size);
393 __set_memory((unsigned long) _stext,
394 (_etext - _stext) >> PAGE_SHIFT,
395 SET_MEMORY_RO | SET_MEMORY_X);
396 __set_memory((unsigned long) _etext,
397 (_eshared - _etext) >> PAGE_SHIFT,
399 __set_memory((unsigned long) _sinittext,
400 (_einittext - _sinittext) >> PAGE_SHIFT,
401 SET_MEMORY_RO | SET_MEMORY_X);
402 pr_info("Write protected kernel read-only data: %luk\n",
403 (_eshared - _stext) >> 10);
407 * Convert memblock.memory to a memory segment list so there is a single
408 * list that contains all memory segments.
410 static int __init vmem_convert_memory_chunk(void)
412 struct memblock_region *reg;
413 struct memory_segment *seg;
415 mutex_lock(&vmem_mutex);
416 for_each_memblock(memory, reg) {
417 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
419 panic("Out of memory...\n");
420 seg->start = reg->base;
421 seg->size = reg->size;
422 insert_memory_segment(seg);
424 mutex_unlock(&vmem_mutex);
428 core_initcall(vmem_convert_memory_chunk);