2 * Copyright IBM Corp. 2006
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 #include <linux/bootmem.h>
9 #include <linux/module.h>
10 #include <linux/list.h>
11 #include <linux/hugetlb.h>
12 #include <linux/slab.h>
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/setup.h>
16 #include <asm/tlbflush.h>
17 #include <asm/sections.h>
19 static DEFINE_MUTEX(vmem_mutex);
21 struct memory_segment {
22 struct list_head list;
27 static LIST_HEAD(mem_segs);
29 static void __ref *vmem_alloc_pages(unsigned int order)
31 if (slab_is_available())
32 return (void *)__get_free_pages(GFP_KERNEL, order);
33 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
36 static inline pud_t *vmem_pud_alloc(void)
41 pud = vmem_alloc_pages(2);
44 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
49 static inline pmd_t *vmem_pmd_alloc(void)
54 pmd = vmem_alloc_pages(2);
57 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
62 static pte_t __ref *vmem_pte_alloc(unsigned long address)
66 if (slab_is_available())
67 pte = (pte_t *) page_table_alloc(&init_mm, address);
69 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
72 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
73 PTRS_PER_PTE * sizeof(pte_t));
78 * Add a physical memory range to the 1:1 mapping.
80 static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
82 unsigned long end = start + size;
83 unsigned long address = start;
91 while (address < end) {
92 pg_dir = pgd_offset_k(address);
93 if (pgd_none(*pg_dir)) {
94 pu_dir = vmem_pud_alloc();
97 pgd_populate(&init_mm, pg_dir, pu_dir);
100 pu_dir = pud_offset(pg_dir, address);
101 if (pud_none(*pu_dir)) {
102 pm_dir = vmem_pmd_alloc();
105 pud_populate(&init_mm, pu_dir, pm_dir);
108 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
109 pm_dir = pmd_offset(pu_dir, address);
111 #if defined(CONFIG_64BIT) && !defined(CONFIG_DEBUG_PAGEALLOC)
112 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
113 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
114 pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
115 pmd_val(*pm_dir) = pte_val(pte);
120 if (pmd_none(*pm_dir)) {
121 pt_dir = vmem_pte_alloc(address);
124 pmd_populate(&init_mm, pm_dir, pt_dir);
127 pt_dir = pte_offset_kernel(pm_dir, address);
129 address += PAGE_SIZE;
133 flush_tlb_kernel_range(start, end);
138 * Remove a physical memory range from the 1:1 mapping.
139 * Currently only invalidates page table entries.
141 static void vmem_remove_range(unsigned long start, unsigned long size)
143 unsigned long end = start + size;
144 unsigned long address = start;
151 pte_val(pte) = _PAGE_TYPE_EMPTY;
152 while (address < end) {
153 pg_dir = pgd_offset_k(address);
154 if (pgd_none(*pg_dir)) {
155 address += PGDIR_SIZE;
158 pu_dir = pud_offset(pg_dir, address);
159 if (pud_none(*pu_dir)) {
163 pm_dir = pmd_offset(pu_dir, address);
164 if (pmd_none(*pm_dir)) {
168 if (pmd_large(*pm_dir)) {
173 pt_dir = pte_offset_kernel(pm_dir, address);
175 address += PAGE_SIZE;
177 flush_tlb_kernel_range(start, end);
181 * Add a backed mem_map array to the virtual mem_map array.
183 int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
185 unsigned long address, start_addr, end_addr;
193 start_addr = (unsigned long) start;
194 end_addr = (unsigned long) (start + nr);
196 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
197 pg_dir = pgd_offset_k(address);
198 if (pgd_none(*pg_dir)) {
199 pu_dir = vmem_pud_alloc();
202 pgd_populate(&init_mm, pg_dir, pu_dir);
205 pu_dir = pud_offset(pg_dir, address);
206 if (pud_none(*pu_dir)) {
207 pm_dir = vmem_pmd_alloc();
210 pud_populate(&init_mm, pu_dir, pm_dir);
213 pm_dir = pmd_offset(pu_dir, address);
214 if (pmd_none(*pm_dir)) {
215 pt_dir = vmem_pte_alloc(address);
218 pmd_populate(&init_mm, pm_dir, pt_dir);
221 pt_dir = pte_offset_kernel(pm_dir, address);
222 if (pte_none(*pt_dir)) {
223 unsigned long new_page;
225 new_page =__pa(vmem_alloc_pages(0));
228 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
232 memset(start, 0, nr * sizeof(struct page));
235 flush_tlb_kernel_range(start_addr, end_addr);
240 * Add memory segment to the segment list if it doesn't overlap with
241 * an already present segment.
243 static int insert_memory_segment(struct memory_segment *seg)
245 struct memory_segment *tmp;
247 if (seg->start + seg->size > VMEM_MAX_PHYS ||
248 seg->start + seg->size < seg->start)
251 list_for_each_entry(tmp, &mem_segs, list) {
252 if (seg->start >= tmp->start + tmp->size)
254 if (seg->start + seg->size <= tmp->start)
258 list_add(&seg->list, &mem_segs);
263 * Remove memory segment from the segment list.
265 static void remove_memory_segment(struct memory_segment *seg)
267 list_del(&seg->list);
270 static void __remove_shared_memory(struct memory_segment *seg)
272 remove_memory_segment(seg);
273 vmem_remove_range(seg->start, seg->size);
276 int vmem_remove_mapping(unsigned long start, unsigned long size)
278 struct memory_segment *seg;
281 mutex_lock(&vmem_mutex);
284 list_for_each_entry(seg, &mem_segs, list) {
285 if (seg->start == start && seg->size == size)
289 if (seg->start != start || seg->size != size)
293 __remove_shared_memory(seg);
296 mutex_unlock(&vmem_mutex);
300 int vmem_add_mapping(unsigned long start, unsigned long size)
302 struct memory_segment *seg;
305 mutex_lock(&vmem_mutex);
307 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
313 ret = insert_memory_segment(seg);
317 ret = vmem_add_mem(start, size, 0);
323 __remove_shared_memory(seg);
327 mutex_unlock(&vmem_mutex);
332 * map whole physical memory to virtual memory (identity mapping)
333 * we reserve enough space in the vmalloc area for vmemmap to hotplug
334 * additional memory segments.
336 void __init vmem_map_init(void)
338 unsigned long ro_start, ro_end;
339 unsigned long start, end;
342 ro_start = PFN_ALIGN((unsigned long)&_stext);
343 ro_end = (unsigned long)&_eshared & PAGE_MASK;
344 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
345 if (memory_chunk[i].type == CHUNK_CRASHK ||
346 memory_chunk[i].type == CHUNK_OLDMEM)
348 start = memory_chunk[i].addr;
349 end = memory_chunk[i].addr + memory_chunk[i].size;
350 if (start >= ro_end || end <= ro_start)
351 vmem_add_mem(start, end - start, 0);
352 else if (start >= ro_start && end <= ro_end)
353 vmem_add_mem(start, end - start, 1);
354 else if (start >= ro_start) {
355 vmem_add_mem(start, ro_end - start, 1);
356 vmem_add_mem(ro_end, end - ro_end, 0);
357 } else if (end < ro_end) {
358 vmem_add_mem(start, ro_start - start, 0);
359 vmem_add_mem(ro_start, end - ro_start, 1);
361 vmem_add_mem(start, ro_start - start, 0);
362 vmem_add_mem(ro_start, ro_end - ro_start, 1);
363 vmem_add_mem(ro_end, end - ro_end, 0);
369 * Convert memory chunk array to a memory segment list so there is a single
370 * list that contains both r/w memory and shared memory segments.
372 static int __init vmem_convert_memory_chunk(void)
374 struct memory_segment *seg;
377 mutex_lock(&vmem_mutex);
378 for (i = 0; i < MEMORY_CHUNKS; i++) {
379 if (!memory_chunk[i].size)
381 if (memory_chunk[i].type == CHUNK_CRASHK ||
382 memory_chunk[i].type == CHUNK_OLDMEM)
384 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
386 panic("Out of memory...\n");
387 seg->start = memory_chunk[i].addr;
388 seg->size = memory_chunk[i].size;
389 insert_memory_segment(seg);
391 mutex_unlock(&vmem_mutex);
395 core_initcall(vmem_convert_memory_chunk);