3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/suspend.h>
34 #include <linux/lmb.h>
35 #include <linux/hugetlb.h>
37 #include <asm/pgalloc.h>
40 #include <asm/mmu_context.h>
41 #include <asm/pgtable.h>
44 #include <asm/machdep.h>
45 #include <asm/btext.h>
47 #include <asm/sections.h>
48 #include <asm/sparsemem.h>
50 #include <asm/fixmap.h>
51 #include <asm/swiotlb.h>
55 #ifndef CPU_FTR_COHERENT_ICACHE
56 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
57 #define CPU_FTR_NOEXECUTE 0
60 int init_bootmem_done;
62 phys_addr_t memory_limit;
68 EXPORT_SYMBOL(kmap_prot);
69 EXPORT_SYMBOL(kmap_pte);
71 static inline pte_t *virt_to_kpte(unsigned long vaddr)
73 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
74 vaddr), vaddr), vaddr);
78 int page_is_ram(unsigned long pfn)
80 #ifndef CONFIG_PPC64 /* XXX for now */
83 unsigned long paddr = (pfn << PAGE_SHIFT);
85 for (i=0; i < lmb.memory.cnt; i++) {
88 base = lmb.memory.region[i].base;
90 if ((paddr >= base) &&
91 (paddr < (base + lmb.memory.region[i].size))) {
100 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
101 unsigned long size, pgprot_t vma_prot)
103 if (ppc_md.phys_mem_access_prot)
104 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
106 if (!page_is_ram(pfn))
107 vma_prot = pgprot_noncached(vma_prot);
111 EXPORT_SYMBOL(phys_mem_access_prot);
113 #ifdef CONFIG_MEMORY_HOTPLUG
116 int memory_add_physaddr_to_nid(u64 start)
118 return hot_add_scn_to_nid(start);
122 int arch_add_memory(int nid, u64 start, u64 size)
124 struct pglist_data *pgdata;
126 unsigned long start_pfn = start >> PAGE_SHIFT;
127 unsigned long nr_pages = size >> PAGE_SHIFT;
129 pgdata = NODE_DATA(nid);
131 start = (unsigned long)__va(start);
132 create_section_mapping(start, start + size);
134 /* this should work for most non-highmem platforms */
135 zone = pgdata->node_zones;
137 return __add_pages(nid, zone, start_pfn, nr_pages);
139 #endif /* CONFIG_MEMORY_HOTPLUG */
142 * walk_memory_resource() needs to make sure there is no holes in a given
143 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
144 * Instead it maintains it in lmb.memory structures. Walk through the
145 * memory regions, find holes and callback for contiguous regions.
148 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
149 void *arg, int (*func)(unsigned long, unsigned long, void *))
151 struct lmb_property res;
152 unsigned long pfn, len;
156 res.base = (u64) start_pfn << PAGE_SHIFT;
157 res.size = (u64) nr_pages << PAGE_SHIFT;
159 end = res.base + res.size - 1;
160 while ((res.base < end) && (lmb_find(&res) >= 0)) {
161 pfn = (unsigned long)(res.base >> PAGE_SHIFT);
162 len = (unsigned long)(res.size >> PAGE_SHIFT);
163 ret = (*func)(pfn, len, arg);
166 res.base += (res.size + 1);
167 res.size = (end - res.base + 1);
171 EXPORT_SYMBOL_GPL(walk_system_ram_range);
174 * Initialize the bootmem system and give it all the memory we
175 * have available. If we are using highmem, we only put the
176 * lowmem into the bootmem system.
178 #ifndef CONFIG_NEED_MULTIPLE_NODES
179 void __init do_init_bootmem(void)
182 unsigned long start, bootmap_pages;
183 unsigned long total_pages;
186 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
187 total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
188 #ifdef CONFIG_HIGHMEM
189 total_pages = total_lowmem >> PAGE_SHIFT;
190 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
194 * Find an area to use for the bootmem bitmap. Calculate the size of
195 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
196 * Add 1 additional page in case the address isn't page-aligned.
198 bootmap_pages = bootmem_bootmap_pages(total_pages);
200 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
202 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
203 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
205 /* Add active regions with valid PFNs */
206 for (i = 0; i < lmb.memory.cnt; i++) {
207 unsigned long start_pfn, end_pfn;
208 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
209 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
210 add_active_range(0, start_pfn, end_pfn);
213 /* Add all physical memory to the bootmem map, mark each area
216 #ifdef CONFIG_HIGHMEM
217 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
219 /* reserve the sections we're already using */
220 for (i = 0; i < lmb.reserved.cnt; i++) {
221 unsigned long addr = lmb.reserved.region[i].base +
222 lmb_size_bytes(&lmb.reserved, i) - 1;
223 if (addr < lowmem_end_addr)
224 reserve_bootmem(lmb.reserved.region[i].base,
225 lmb_size_bytes(&lmb.reserved, i),
227 else if (lmb.reserved.region[i].base < lowmem_end_addr) {
228 unsigned long adjusted_size = lowmem_end_addr -
229 lmb.reserved.region[i].base;
230 reserve_bootmem(lmb.reserved.region[i].base,
231 adjusted_size, BOOTMEM_DEFAULT);
235 free_bootmem_with_active_regions(0, max_pfn);
237 /* reserve the sections we're already using */
238 for (i = 0; i < lmb.reserved.cnt; i++)
239 reserve_bootmem(lmb.reserved.region[i].base,
240 lmb_size_bytes(&lmb.reserved, i),
244 /* XXX need to clip this if using highmem? */
245 sparse_memory_present_with_active_regions(0);
247 init_bootmem_done = 1;
250 /* mark pages that don't exist as nosave */
251 static int __init mark_nonram_nosave(void)
253 unsigned long lmb_next_region_start_pfn,
257 for (i = 0; i < lmb.memory.cnt - 1; i++) {
259 (lmb.memory.region[i].base >> PAGE_SHIFT) +
260 (lmb.memory.region[i].size >> PAGE_SHIFT);
261 lmb_next_region_start_pfn =
262 lmb.memory.region[i+1].base >> PAGE_SHIFT;
264 if (lmb_region_max_pfn < lmb_next_region_start_pfn)
265 register_nosave_region(lmb_region_max_pfn,
266 lmb_next_region_start_pfn);
273 * paging_init() sets up the page tables - in fact we've already done this.
275 void __init paging_init(void)
277 unsigned long total_ram = lmb_phys_mem_size();
278 phys_addr_t top_of_ram = lmb_end_of_DRAM();
279 unsigned long max_zone_pfns[MAX_NR_ZONES];
282 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
283 unsigned long end = __fix_to_virt(FIX_HOLE);
285 for (; v < end; v += PAGE_SIZE)
286 map_page(v, 0, 0); /* XXX gross */
289 #ifdef CONFIG_HIGHMEM
290 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
291 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
293 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
294 kmap_prot = PAGE_KERNEL;
295 #endif /* CONFIG_HIGHMEM */
297 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
298 (unsigned long long)top_of_ram, total_ram);
299 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
300 (long int)((top_of_ram - total_ram) >> 20));
301 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
302 #ifdef CONFIG_HIGHMEM
303 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
304 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
306 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
308 free_area_init_nodes(max_zone_pfns);
310 mark_nonram_nosave();
312 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
314 void __init mem_init(void)
316 #ifdef CONFIG_NEED_MULTIPLE_NODES
322 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
324 #ifdef CONFIG_SWIOTLB
325 if (ppc_swiotlb_enable)
329 num_physpages = lmb.memory.size >> PAGE_SHIFT;
330 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
332 #ifdef CONFIG_NEED_MULTIPLE_NODES
333 for_each_online_node(nid) {
334 if (NODE_DATA(nid)->node_spanned_pages != 0) {
335 printk("freeing bootmem node %d\n", nid);
337 free_all_bootmem_node(NODE_DATA(nid));
342 totalram_pages += free_all_bootmem();
344 for_each_online_pgdat(pgdat) {
345 for (i = 0; i < pgdat->node_spanned_pages; i++) {
346 if (!pfn_valid(pgdat->node_start_pfn + i))
348 page = pgdat_page_nr(pgdat, i);
349 if (PageReserved(page))
354 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
355 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
356 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
357 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
359 #ifdef CONFIG_HIGHMEM
361 unsigned long pfn, highmem_mapnr;
363 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
364 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
365 struct page *page = pfn_to_page(pfn);
366 if (lmb_is_reserved(pfn << PAGE_SHIFT))
368 ClearPageReserved(page);
369 init_page_count(page);
374 totalram_pages += totalhigh_pages;
375 printk(KERN_DEBUG "High memory: %luk\n",
376 totalhigh_pages << (PAGE_SHIFT-10));
378 #endif /* CONFIG_HIGHMEM */
380 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
381 "%luk reserved, %luk data, %luk bss, %luk init)\n",
382 nr_free_pages() << (PAGE_SHIFT-10),
383 num_physpages << (PAGE_SHIFT-10),
385 reservedpages << (PAGE_SHIFT-10),
391 pr_info("Kernel virtual memory layout:\n");
392 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
393 #ifdef CONFIG_HIGHMEM
394 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
395 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
396 #endif /* CONFIG_HIGHMEM */
397 #ifdef CONFIG_NOT_COHERENT_CACHE
398 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
399 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
400 #endif /* CONFIG_NOT_COHERENT_CACHE */
401 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
402 ioremap_bot, IOREMAP_TOP);
403 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
404 VMALLOC_START, VMALLOC_END);
405 #endif /* CONFIG_PPC32 */
411 * This is called when a page has been modified by the kernel.
412 * It just marks the page as not i-cache clean. We do the i-cache
413 * flush later when the page is given to a user process, if necessary.
415 void flush_dcache_page(struct page *page)
417 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
419 /* avoid an atomic op if possible */
420 if (test_bit(PG_arch_1, &page->flags))
421 clear_bit(PG_arch_1, &page->flags);
423 EXPORT_SYMBOL(flush_dcache_page);
425 void flush_dcache_icache_page(struct page *page)
427 #ifdef CONFIG_HUGETLB_PAGE
428 if (PageCompound(page)) {
429 flush_dcache_icache_hugepage(page);
435 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
436 __flush_dcache_icache(start);
437 kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
439 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
440 /* On 8xx there is no need to kmap since highmem is not supported */
441 __flush_dcache_icache(page_address(page));
443 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
447 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
452 * We shouldnt have to do this, but some versions of glibc
453 * require it (ld.so assumes zero filled pages are icache clean)
456 flush_dcache_page(pg);
458 EXPORT_SYMBOL(clear_user_page);
460 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
463 copy_page(vto, vfrom);
466 * We should be able to use the following optimisation, however
467 * there are two problems.
468 * Firstly a bug in some versions of binutils meant PLT sections
469 * were not marked executable.
470 * Secondly the first word in the GOT section is blrl, used
471 * to establish the GOT address. Until recently the GOT was
472 * not marked executable.
476 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
480 flush_dcache_page(pg);
483 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
484 unsigned long addr, int len)
488 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
489 flush_icache_range(maddr, maddr + len);
492 EXPORT_SYMBOL(flush_icache_user_range);
495 * This is called at the end of handling a user page fault, when the
496 * fault has been handled by updating a PTE in the linux page tables.
497 * We use it to preload an HPTE into the hash table corresponding to
498 * the updated linux PTE.
500 * This must always be called with the pte lock held.
502 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
505 #ifdef CONFIG_PPC_STD_MMU
506 unsigned long access = 0, trap;
508 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
509 if (!pte_young(*ptep) || address >= TASK_SIZE)
512 /* We try to figure out if we are coming from an instruction
513 * access fault and pass that down to __hash_page so we avoid
514 * double-faulting on execution of fresh text. We have to test
515 * for regs NULL since init will get here first thing at boot
517 * We also avoid filling the hash if not coming from a fault
519 if (current->thread.regs == NULL)
521 trap = TRAP(current->thread.regs);
523 access |= _PAGE_EXEC;
524 else if (trap != 0x300)
526 hash_preload(vma->vm_mm, address, access, trap);
527 #endif /* CONFIG_PPC_STD_MMU */