3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 #include <linux/export.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/gfp.h>
26 #include <linux/types.h>
28 #include <linux/stddef.h>
29 #include <linux/init.h>
30 #include <linux/bootmem.h>
31 #include <linux/highmem.h>
32 #include <linux/initrd.h>
33 #include <linux/pagemap.h>
34 #include <linux/suspend.h>
35 #include <linux/memblock.h>
36 #include <linux/hugetlb.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
40 #include <asm/pgalloc.h>
43 #include <asm/mmu_context.h>
44 #include <asm/pgtable.h>
47 #include <asm/machdep.h>
48 #include <asm/btext.h>
50 #include <asm/sections.h>
51 #include <asm/sparsemem.h>
53 #include <asm/fixmap.h>
54 #include <asm/swiotlb.h>
59 #ifndef CPU_FTR_COHERENT_ICACHE
60 #define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
61 #define CPU_FTR_NOEXECUTE 0
64 int init_bootmem_done;
66 unsigned long long memory_limit;
70 EXPORT_SYMBOL(kmap_pte);
72 EXPORT_SYMBOL(kmap_prot);
74 static inline pte_t *virt_to_kpte(unsigned long vaddr)
76 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
77 vaddr), vaddr), vaddr);
81 int page_is_ram(unsigned long pfn)
83 #ifndef CONFIG_PPC64 /* XXX for now */
86 unsigned long paddr = (pfn << PAGE_SHIFT);
87 struct memblock_region *reg;
89 for_each_memblock(memory, reg)
90 if (paddr >= reg->base && paddr < (reg->base + reg->size))
96 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
97 unsigned long size, pgprot_t vma_prot)
99 if (ppc_md.phys_mem_access_prot)
100 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
102 if (!page_is_ram(pfn))
103 vma_prot = pgprot_noncached(vma_prot);
107 EXPORT_SYMBOL(phys_mem_access_prot);
109 #ifdef CONFIG_MEMORY_HOTPLUG
112 int memory_add_physaddr_to_nid(u64 start)
114 return hot_add_scn_to_nid(start);
118 int arch_add_memory(int nid, u64 start, u64 size)
120 struct pglist_data *pgdata;
122 unsigned long start_pfn = start >> PAGE_SHIFT;
123 unsigned long nr_pages = size >> PAGE_SHIFT;
125 pgdata = NODE_DATA(nid);
127 start = (unsigned long)__va(start);
128 if (create_section_mapping(start, start + size))
131 /* this should work for most non-highmem platforms */
132 zone = pgdata->node_zones +
133 zone_for_memory(nid, start, size, 0);
135 return __add_pages(nid, zone, start_pfn, nr_pages);
138 #ifdef CONFIG_MEMORY_HOTREMOVE
139 int arch_remove_memory(u64 start, u64 size)
141 unsigned long start_pfn = start >> PAGE_SHIFT;
142 unsigned long nr_pages = size >> PAGE_SHIFT;
146 zone = page_zone(pfn_to_page(start_pfn));
147 ret = __remove_pages(zone, start_pfn, nr_pages);
151 /* Remove htab bolted mappings for this section of memory */
152 start = (unsigned long)__va(start);
153 ret = remove_section_mapping(start, start + size);
155 /* Ensure all vmalloc mappings are flushed in case they also
156 * hit that section of memory
163 #endif /* CONFIG_MEMORY_HOTPLUG */
166 * walk_memory_resource() needs to make sure there is no holes in a given
167 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
168 * Instead it maintains it in memblock.memory structures. Walk through the
169 * memory regions, find holes and callback for contiguous regions.
172 walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
173 void *arg, int (*func)(unsigned long, unsigned long, void *))
175 struct memblock_region *reg;
176 unsigned long end_pfn = start_pfn + nr_pages;
177 unsigned long tstart, tend;
180 for_each_memblock(memory, reg) {
181 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
182 tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
185 ret = (*func)(tstart, tend - tstart, arg);
191 EXPORT_SYMBOL_GPL(walk_system_ram_range);
194 * Initialize the bootmem system and give it all the memory we
195 * have available. If we are using highmem, we only put the
196 * lowmem into the bootmem system.
198 #ifndef CONFIG_NEED_MULTIPLE_NODES
199 void __init do_init_bootmem(void)
201 unsigned long start, bootmap_pages;
202 unsigned long total_pages;
203 struct memblock_region *reg;
206 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
207 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
208 #ifdef CONFIG_HIGHMEM
209 total_pages = total_lowmem >> PAGE_SHIFT;
210 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
214 * Find an area to use for the bootmem bitmap. Calculate the size of
215 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
216 * Add 1 additional page in case the address isn't page-aligned.
218 bootmap_pages = bootmem_bootmap_pages(total_pages);
220 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
222 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
223 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
225 /* Place all memblock_regions in the same node and merge contiguous
228 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
230 /* Add all physical memory to the bootmem map, mark each area
233 #ifdef CONFIG_HIGHMEM
234 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
236 /* reserve the sections we're already using */
237 for_each_memblock(reserved, reg) {
238 unsigned long top = reg->base + reg->size - 1;
239 if (top < lowmem_end_addr)
240 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
241 else if (reg->base < lowmem_end_addr) {
242 unsigned long trunc_size = lowmem_end_addr - reg->base;
243 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
247 free_bootmem_with_active_regions(0, max_pfn);
249 /* reserve the sections we're already using */
250 for_each_memblock(reserved, reg)
251 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
253 /* XXX need to clip this if using highmem? */
254 sparse_memory_present_with_active_regions(0);
256 init_bootmem_done = 1;
259 /* mark pages that don't exist as nosave */
260 static int __init mark_nonram_nosave(void)
262 struct memblock_region *reg, *prev = NULL;
264 for_each_memblock(memory, reg) {
266 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
267 register_nosave_region(memblock_region_memory_end_pfn(prev),
268 memblock_region_memory_base_pfn(reg));
273 #else /* CONFIG_NEED_MULTIPLE_NODES */
274 static int __init mark_nonram_nosave(void)
280 static bool zone_limits_final;
282 static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
283 [0 ... MAX_NR_ZONES - 1] = ~0UL
287 * Restrict the specified zone and all more restrictive zones
288 * to be below the specified pfn. May not be called after
291 void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
295 if (WARN_ON(zone_limits_final))
298 for (i = zone; i >= 0; i--) {
299 if (max_zone_pfns[i] > pfn_limit)
300 max_zone_pfns[i] = pfn_limit;
305 * Find the least restrictive zone that is entirely below the
306 * specified pfn limit. Returns < 0 if no suitable zone is found.
308 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
309 * systems -- the DMA limit can be higher than any possible real pfn.
311 int dma_pfn_limit_to_zone(u64 pfn_limit)
313 enum zone_type top_zone = ZONE_NORMAL;
316 #ifdef CONFIG_HIGHMEM
317 top_zone = ZONE_HIGHMEM;
320 for (i = top_zone; i >= 0; i--) {
321 if (max_zone_pfns[i] <= pfn_limit)
329 * paging_init() sets up the page tables - in fact we've already done this.
331 void __init paging_init(void)
333 unsigned long long total_ram = memblock_phys_mem_size();
334 phys_addr_t top_of_ram = memblock_end_of_DRAM();
335 enum zone_type top_zone;
338 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
339 unsigned long end = __fix_to_virt(FIX_HOLE);
341 for (; v < end; v += PAGE_SIZE)
342 map_page(v, 0, 0); /* XXX gross */
345 #ifdef CONFIG_HIGHMEM
346 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
347 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
349 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
350 kmap_prot = PAGE_KERNEL;
351 #endif /* CONFIG_HIGHMEM */
353 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
354 (unsigned long long)top_of_ram, total_ram);
355 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
356 (long int)((top_of_ram - total_ram) >> 20));
358 #ifdef CONFIG_HIGHMEM
359 top_zone = ZONE_HIGHMEM;
360 limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
362 top_zone = ZONE_NORMAL;
365 limit_zone_pfn(top_zone, top_of_ram >> PAGE_SHIFT);
366 zone_limits_final = true;
367 free_area_init_nodes(max_zone_pfns);
369 mark_nonram_nosave();
372 static void __init register_page_bootmem_info(void)
376 for_each_online_node(i)
377 register_page_bootmem_info_node(NODE_DATA(i));
380 void __init mem_init(void)
383 * book3s is limited to 16 page sizes due to encoding this in
384 * a 4-bit field for slices.
386 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
388 #ifdef CONFIG_SWIOTLB
392 register_page_bootmem_info();
393 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
394 set_max_mapnr(max_pfn);
397 #ifdef CONFIG_HIGHMEM
399 unsigned long pfn, highmem_mapnr;
401 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
402 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
403 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
404 struct page *page = pfn_to_page(pfn);
405 if (!memblock_is_reserved(paddr))
406 free_highmem_page(page);
409 #endif /* CONFIG_HIGHMEM */
411 #if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
413 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
414 * functions.... do it here for the non-smp case.
416 per_cpu(next_tlbcam_idx, smp_processor_id()) =
417 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
420 mem_init_print_info(NULL);
422 pr_info("Kernel virtual memory layout:\n");
423 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
424 #ifdef CONFIG_HIGHMEM
425 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
426 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
427 #endif /* CONFIG_HIGHMEM */
428 #ifdef CONFIG_NOT_COHERENT_CACHE
429 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
430 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
431 #endif /* CONFIG_NOT_COHERENT_CACHE */
432 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
433 ioremap_bot, IOREMAP_TOP);
434 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
435 VMALLOC_START, VMALLOC_END);
436 #endif /* CONFIG_PPC32 */
441 void free_initmem(void)
443 ppc_md.progress = ppc_printk_progress;
444 free_initmem_default(POISON_FREE_INITMEM);
447 #ifdef CONFIG_BLK_DEV_INITRD
448 void __init free_initrd_mem(unsigned long start, unsigned long end)
450 free_reserved_area((void *)start, (void *)end, -1, "initrd");
455 * This is called when a page has been modified by the kernel.
456 * It just marks the page as not i-cache clean. We do the i-cache
457 * flush later when the page is given to a user process, if necessary.
459 void flush_dcache_page(struct page *page)
461 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
463 /* avoid an atomic op if possible */
464 if (test_bit(PG_arch_1, &page->flags))
465 clear_bit(PG_arch_1, &page->flags);
467 EXPORT_SYMBOL(flush_dcache_page);
469 void flush_dcache_icache_page(struct page *page)
471 #ifdef CONFIG_HUGETLB_PAGE
472 if (PageCompound(page)) {
473 flush_dcache_icache_hugepage(page);
479 void *start = kmap_atomic(page);
480 __flush_dcache_icache(start);
481 kunmap_atomic(start);
483 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
484 /* On 8xx there is no need to kmap since highmem is not supported */
485 __flush_dcache_icache(page_address(page));
487 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
490 EXPORT_SYMBOL(flush_dcache_icache_page);
492 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
497 * We shouldn't have to do this, but some versions of glibc
498 * require it (ld.so assumes zero filled pages are icache clean)
501 flush_dcache_page(pg);
503 EXPORT_SYMBOL(clear_user_page);
505 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
508 copy_page(vto, vfrom);
511 * We should be able to use the following optimisation, however
512 * there are two problems.
513 * Firstly a bug in some versions of binutils meant PLT sections
514 * were not marked executable.
515 * Secondly the first word in the GOT section is blrl, used
516 * to establish the GOT address. Until recently the GOT was
517 * not marked executable.
521 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
525 flush_dcache_page(pg);
528 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
529 unsigned long addr, int len)
533 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
534 flush_icache_range(maddr, maddr + len);
537 EXPORT_SYMBOL(flush_icache_user_range);
540 * This is called at the end of handling a user page fault, when the
541 * fault has been handled by updating a PTE in the linux page tables.
542 * We use it to preload an HPTE into the hash table corresponding to
543 * the updated linux PTE.
545 * This must always be called with the pte lock held.
547 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
550 #ifdef CONFIG_PPC_STD_MMU
552 * We don't need to worry about _PAGE_PRESENT here because we are
553 * called with either mm->page_table_lock held or ptl lock held
555 unsigned long access = 0, trap;
557 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
558 if (!pte_young(*ptep) || address >= TASK_SIZE)
561 /* We try to figure out if we are coming from an instruction
562 * access fault and pass that down to __hash_page so we avoid
563 * double-faulting on execution of fresh text. We have to test
564 * for regs NULL since init will get here first thing at boot
566 * We also avoid filling the hash if not coming from a fault
568 if (current->thread.regs == NULL)
570 trap = TRAP(current->thread.regs);
572 access |= _PAGE_EXEC;
573 else if (trap != 0x300)
575 hash_preload(vma->vm_mm, address, access, trap);
576 #endif /* CONFIG_PPC_STD_MMU */
577 #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
578 && defined(CONFIG_HUGETLB_PAGE)
579 if (is_vm_hugetlb_page(vma))
580 book3e_hugetlb_preload(vma, address, *ptep);
585 * System memory should not be in /proc/iomem but various tools expect it
588 static int __init add_system_ram_resources(void)
590 struct memblock_region *reg;
592 for_each_memblock(memory, reg) {
593 struct resource *res;
594 unsigned long base = reg->base;
595 unsigned long size = reg->size;
597 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
601 res->name = "System RAM";
603 res->end = base + size - 1;
604 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
605 WARN_ON(request_resource(&iomem_resource, res) < 0);
611 subsys_initcall(add_system_ram_resources);
613 #ifdef CONFIG_STRICT_DEVMEM
615 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
616 * is valid. The argument is a physical page number.
618 * Access has to be given to non-kernel-ram areas as well, these contain the
619 * PCI mmio resources as well as potential bios/acpi data regions.
621 int devmem_is_allowed(unsigned long pfn)
623 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
625 if (!page_is_ram(pfn))
627 if (page_is_rtas_user_buf(pfn))
631 #endif /* CONFIG_STRICT_DEVMEM */